##// END OF EJS Templates
fix spelling error
Matt Mackall -
r5666:9d6ad26f default
parent child Browse files
Show More
@@ -1,2022 +1,2022 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError, name
101 raise AttributeError, name
102
102
103 def url(self):
103 def url(self):
104 return 'file:' + self.root
104 return 'file:' + self.root
105
105
106 def hook(self, name, throw=False, **args):
106 def hook(self, name, throw=False, **args):
107 return hook.hook(self.ui, self, name, throw, **args)
107 return hook.hook(self.ui, self, name, throw, **args)
108
108
109 tag_disallowed = ':\r\n'
109 tag_disallowed = ':\r\n'
110
110
111 def _tag(self, name, node, message, local, user, date, parent=None,
111 def _tag(self, name, node, message, local, user, date, parent=None,
112 extra={}):
112 extra={}):
113 use_dirstate = parent is None
113 use_dirstate = parent is None
114
114
115 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
116 if c in name:
116 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
118
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
120
121 def writetag(fp, name, munge, prevtags):
121 def writetag(fp, name, munge, prevtags):
122 if prevtags and prevtags[-1] != '\n':
122 if prevtags and prevtags[-1] != '\n':
123 fp.write('\n')
123 fp.write('\n')
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.close()
125 fp.close()
126 self.hook('tag', node=hex(node), tag=name, local=local)
126 self.hook('tag', node=hex(node), tag=name, local=local)
127
127
128 prevtags = ''
128 prevtags = ''
129 if local:
129 if local:
130 try:
130 try:
131 fp = self.opener('localtags', 'r+')
131 fp = self.opener('localtags', 'r+')
132 except IOError, err:
132 except IOError, err:
133 fp = self.opener('localtags', 'a')
133 fp = self.opener('localtags', 'a')
134 else:
134 else:
135 prevtags = fp.read()
135 prevtags = fp.read()
136
136
137 # local tags are stored in the current charset
137 # local tags are stored in the current charset
138 writetag(fp, name, None, prevtags)
138 writetag(fp, name, None, prevtags)
139 return
139 return
140
140
141 if use_dirstate:
141 if use_dirstate:
142 try:
142 try:
143 fp = self.wfile('.hgtags', 'rb+')
143 fp = self.wfile('.hgtags', 'rb+')
144 except IOError, err:
144 except IOError, err:
145 fp = self.wfile('.hgtags', 'ab')
145 fp = self.wfile('.hgtags', 'ab')
146 else:
146 else:
147 prevtags = fp.read()
147 prevtags = fp.read()
148 else:
148 else:
149 try:
149 try:
150 prevtags = self.filectx('.hgtags', parent).data()
150 prevtags = self.filectx('.hgtags', parent).data()
151 except revlog.LookupError:
151 except revlog.LookupError:
152 pass
152 pass
153 fp = self.wfile('.hgtags', 'wb')
153 fp = self.wfile('.hgtags', 'wb')
154 if prevtags:
154 if prevtags:
155 fp.write(prevtags)
155 fp.write(prevtags)
156
156
157 # committed tags are stored in UTF-8
157 # committed tags are stored in UTF-8
158 writetag(fp, name, util.fromlocal, prevtags)
158 writetag(fp, name, util.fromlocal, prevtags)
159
159
160 if use_dirstate and '.hgtags' not in self.dirstate:
160 if use_dirstate and '.hgtags' not in self.dirstate:
161 self.add(['.hgtags'])
161 self.add(['.hgtags'])
162
162
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 extra=extra)
164 extra=extra)
165
165
166 self.hook('tag', node=hex(node), tag=name, local=local)
166 self.hook('tag', node=hex(node), tag=name, local=local)
167
167
168 return tagnode
168 return tagnode
169
169
170 def tag(self, name, node, message, local, user, date):
170 def tag(self, name, node, message, local, user, date):
171 '''tag a revision with a symbolic name.
171 '''tag a revision with a symbolic name.
172
172
173 if local is True, the tag is stored in a per-repository file.
173 if local is True, the tag is stored in a per-repository file.
174 otherwise, it is stored in the .hgtags file, and a new
174 otherwise, it is stored in the .hgtags file, and a new
175 changeset is committed with the change.
175 changeset is committed with the change.
176
176
177 keyword arguments:
177 keyword arguments:
178
178
179 local: whether to store tag in non-version-controlled file
179 local: whether to store tag in non-version-controlled file
180 (default False)
180 (default False)
181
181
182 message: commit message to use if committing
182 message: commit message to use if committing
183
183
184 user: name of user to use if committing
184 user: name of user to use if committing
185
185
186 date: date tuple to use if committing'''
186 date: date tuple to use if committing'''
187
187
188 for x in self.status()[:5]:
188 for x in self.status()[:5]:
189 if '.hgtags' in x:
189 if '.hgtags' in x:
190 raise util.Abort(_('working copy of .hgtags is changed '
190 raise util.Abort(_('working copy of .hgtags is changed '
191 '(please commit .hgtags manually)'))
191 '(please commit .hgtags manually)'))
192
192
193
193
194 self._tag(name, node, message, local, user, date)
194 self._tag(name, node, message, local, user, date)
195
195
196 def tags(self):
196 def tags(self):
197 '''return a mapping of tag to node'''
197 '''return a mapping of tag to node'''
198 if self.tagscache:
198 if self.tagscache:
199 return self.tagscache
199 return self.tagscache
200
200
201 globaltags = {}
201 globaltags = {}
202 tagtypes = {}
202 tagtypes = {}
203
203
204 def readtags(lines, fn, tagtype):
204 def readtags(lines, fn, tagtype):
205 filetags = {}
205 filetags = {}
206 count = 0
206 count = 0
207
207
208 def warn(msg):
208 def warn(msg):
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
210
210
211 for l in lines:
211 for l in lines:
212 count += 1
212 count += 1
213 if not l:
213 if not l:
214 continue
214 continue
215 s = l.split(" ", 1)
215 s = l.split(" ", 1)
216 if len(s) != 2:
216 if len(s) != 2:
217 warn(_("cannot parse entry"))
217 warn(_("cannot parse entry"))
218 continue
218 continue
219 node, key = s
219 node, key = s
220 key = util.tolocal(key.strip()) # stored in UTF-8
220 key = util.tolocal(key.strip()) # stored in UTF-8
221 try:
221 try:
222 bin_n = bin(node)
222 bin_n = bin(node)
223 except TypeError:
223 except TypeError:
224 warn(_("node '%s' is not well formed") % node)
224 warn(_("node '%s' is not well formed") % node)
225 continue
225 continue
226 if bin_n not in self.changelog.nodemap:
226 if bin_n not in self.changelog.nodemap:
227 warn(_("tag '%s' refers to unknown node") % key)
227 warn(_("tag '%s' refers to unknown node") % key)
228 continue
228 continue
229
229
230 h = []
230 h = []
231 if key in filetags:
231 if key in filetags:
232 n, h = filetags[key]
232 n, h = filetags[key]
233 h.append(n)
233 h.append(n)
234 filetags[key] = (bin_n, h)
234 filetags[key] = (bin_n, h)
235
235
236 for k, nh in filetags.items():
236 for k, nh in filetags.items():
237 if k not in globaltags:
237 if k not in globaltags:
238 globaltags[k] = nh
238 globaltags[k] = nh
239 tagtypes[k] = tagtype
239 tagtypes[k] = tagtype
240 continue
240 continue
241
241
242 # we prefer the global tag if:
242 # we prefer the global tag if:
243 # it supercedes us OR
243 # it supercedes us OR
244 # mutual supercedes and it has a higher rank
244 # mutual supercedes and it has a higher rank
245 # otherwise we win because we're tip-most
245 # otherwise we win because we're tip-most
246 an, ah = nh
246 an, ah = nh
247 bn, bh = globaltags[k]
247 bn, bh = globaltags[k]
248 if (bn != an and an in bh and
248 if (bn != an and an in bh and
249 (bn not in ah or len(bh) > len(ah))):
249 (bn not in ah or len(bh) > len(ah))):
250 an = bn
250 an = bn
251 ah.extend([n for n in bh if n not in ah])
251 ah.extend([n for n in bh if n not in ah])
252 globaltags[k] = an, ah
252 globaltags[k] = an, ah
253 tagtypes[k] = tagtype
253 tagtypes[k] = tagtype
254
254
255 # read the tags file from each head, ending with the tip
255 # read the tags file from each head, ending with the tip
256 f = None
256 f = None
257 for rev, node, fnode in self._hgtagsnodes():
257 for rev, node, fnode in self._hgtagsnodes():
258 f = (f and f.filectx(fnode) or
258 f = (f and f.filectx(fnode) or
259 self.filectx('.hgtags', fileid=fnode))
259 self.filectx('.hgtags', fileid=fnode))
260 readtags(f.data().splitlines(), f, "global")
260 readtags(f.data().splitlines(), f, "global")
261
261
262 try:
262 try:
263 data = util.fromlocal(self.opener("localtags").read())
263 data = util.fromlocal(self.opener("localtags").read())
264 # localtags are stored in the local character set
264 # localtags are stored in the local character set
265 # while the internal tag table is stored in UTF-8
265 # while the internal tag table is stored in UTF-8
266 readtags(data.splitlines(), "localtags", "local")
266 readtags(data.splitlines(), "localtags", "local")
267 except IOError:
267 except IOError:
268 pass
268 pass
269
269
270 self.tagscache = {}
270 self.tagscache = {}
271 self._tagstypecache = {}
271 self._tagstypecache = {}
272 for k,nh in globaltags.items():
272 for k,nh in globaltags.items():
273 n = nh[0]
273 n = nh[0]
274 if n != nullid:
274 if n != nullid:
275 self.tagscache[k] = n
275 self.tagscache[k] = n
276 self._tagstypecache[k] = tagtypes[k]
276 self._tagstypecache[k] = tagtypes[k]
277 self.tagscache['tip'] = self.changelog.tip()
277 self.tagscache['tip'] = self.changelog.tip()
278
278
279 return self.tagscache
279 return self.tagscache
280
280
281 def tagtype(self, tagname):
281 def tagtype(self, tagname):
282 '''
282 '''
283 return the type of the given tag. result can be:
283 return the type of the given tag. result can be:
284
284
285 'local' : a local tag
285 'local' : a local tag
286 'global' : a global tag
286 'global' : a global tag
287 None : tag does not exist
287 None : tag does not exist
288 '''
288 '''
289
289
290 self.tags()
290 self.tags()
291
291
292 return self._tagstypecache.get(tagname)
292 return self._tagstypecache.get(tagname)
293
293
294 def _hgtagsnodes(self):
294 def _hgtagsnodes(self):
295 heads = self.heads()
295 heads = self.heads()
296 heads.reverse()
296 heads.reverse()
297 last = {}
297 last = {}
298 ret = []
298 ret = []
299 for node in heads:
299 for node in heads:
300 c = self.changectx(node)
300 c = self.changectx(node)
301 rev = c.rev()
301 rev = c.rev()
302 try:
302 try:
303 fnode = c.filenode('.hgtags')
303 fnode = c.filenode('.hgtags')
304 except revlog.LookupError:
304 except revlog.LookupError:
305 continue
305 continue
306 ret.append((rev, node, fnode))
306 ret.append((rev, node, fnode))
307 if fnode in last:
307 if fnode in last:
308 ret[last[fnode]] = None
308 ret[last[fnode]] = None
309 last[fnode] = len(ret) - 1
309 last[fnode] = len(ret) - 1
310 return [item for item in ret if item]
310 return [item for item in ret if item]
311
311
312 def tagslist(self):
312 def tagslist(self):
313 '''return a list of tags ordered by revision'''
313 '''return a list of tags ordered by revision'''
314 l = []
314 l = []
315 for t, n in self.tags().items():
315 for t, n in self.tags().items():
316 try:
316 try:
317 r = self.changelog.rev(n)
317 r = self.changelog.rev(n)
318 except:
318 except:
319 r = -2 # sort to the beginning of the list if unknown
319 r = -2 # sort to the beginning of the list if unknown
320 l.append((r, t, n))
320 l.append((r, t, n))
321 l.sort()
321 l.sort()
322 return [(t, n) for r, t, n in l]
322 return [(t, n) for r, t, n in l]
323
323
324 def nodetags(self, node):
324 def nodetags(self, node):
325 '''return the tags associated with a node'''
325 '''return the tags associated with a node'''
326 if not self.nodetagscache:
326 if not self.nodetagscache:
327 self.nodetagscache = {}
327 self.nodetagscache = {}
328 for t, n in self.tags().items():
328 for t, n in self.tags().items():
329 self.nodetagscache.setdefault(n, []).append(t)
329 self.nodetagscache.setdefault(n, []).append(t)
330 return self.nodetagscache.get(node, [])
330 return self.nodetagscache.get(node, [])
331
331
332 def _branchtags(self):
332 def _branchtags(self):
333 partial, last, lrev = self._readbranchcache()
333 partial, last, lrev = self._readbranchcache()
334
334
335 tiprev = self.changelog.count() - 1
335 tiprev = self.changelog.count() - 1
336 if lrev != tiprev:
336 if lrev != tiprev:
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
339
339
340 return partial
340 return partial
341
341
342 def branchtags(self):
342 def branchtags(self):
343 if self.branchcache is not None:
343 if self.branchcache is not None:
344 return self.branchcache
344 return self.branchcache
345
345
346 self.branchcache = {} # avoid recursion in changectx
346 self.branchcache = {} # avoid recursion in changectx
347 partial = self._branchtags()
347 partial = self._branchtags()
348
348
349 # the branch cache is stored on disk as UTF-8, but in the local
349 # the branch cache is stored on disk as UTF-8, but in the local
350 # charset internally
350 # charset internally
351 for k, v in partial.items():
351 for k, v in partial.items():
352 self.branchcache[util.tolocal(k)] = v
352 self.branchcache[util.tolocal(k)] = v
353 return self.branchcache
353 return self.branchcache
354
354
355 def _readbranchcache(self):
355 def _readbranchcache(self):
356 partial = {}
356 partial = {}
357 try:
357 try:
358 f = self.opener("branch.cache")
358 f = self.opener("branch.cache")
359 lines = f.read().split('\n')
359 lines = f.read().split('\n')
360 f.close()
360 f.close()
361 except (IOError, OSError):
361 except (IOError, OSError):
362 return {}, nullid, nullrev
362 return {}, nullid, nullrev
363
363
364 try:
364 try:
365 last, lrev = lines.pop(0).split(" ", 1)
365 last, lrev = lines.pop(0).split(" ", 1)
366 last, lrev = bin(last), int(lrev)
366 last, lrev = bin(last), int(lrev)
367 if not (lrev < self.changelog.count() and
367 if not (lrev < self.changelog.count() and
368 self.changelog.node(lrev) == last): # sanity check
368 self.changelog.node(lrev) == last): # sanity check
369 # invalidate the cache
369 # invalidate the cache
370 raise ValueError('Invalid branch cache: unknown tip')
370 raise ValueError('Invalid branch cache: unknown tip')
371 for l in lines:
371 for l in lines:
372 if not l: continue
372 if not l: continue
373 node, label = l.split(" ", 1)
373 node, label = l.split(" ", 1)
374 partial[label.strip()] = bin(node)
374 partial[label.strip()] = bin(node)
375 except (KeyboardInterrupt, util.SignalInterrupt):
375 except (KeyboardInterrupt, util.SignalInterrupt):
376 raise
376 raise
377 except Exception, inst:
377 except Exception, inst:
378 if self.ui.debugflag:
378 if self.ui.debugflag:
379 self.ui.warn(str(inst), '\n')
379 self.ui.warn(str(inst), '\n')
380 partial, last, lrev = {}, nullid, nullrev
380 partial, last, lrev = {}, nullid, nullrev
381 return partial, last, lrev
381 return partial, last, lrev
382
382
383 def _writebranchcache(self, branches, tip, tiprev):
383 def _writebranchcache(self, branches, tip, tiprev):
384 try:
384 try:
385 f = self.opener("branch.cache", "w", atomictemp=True)
385 f = self.opener("branch.cache", "w", atomictemp=True)
386 f.write("%s %s\n" % (hex(tip), tiprev))
386 f.write("%s %s\n" % (hex(tip), tiprev))
387 for label, node in branches.iteritems():
387 for label, node in branches.iteritems():
388 f.write("%s %s\n" % (hex(node), label))
388 f.write("%s %s\n" % (hex(node), label))
389 f.rename()
389 f.rename()
390 except (IOError, OSError):
390 except (IOError, OSError):
391 pass
391 pass
392
392
393 def _updatebranchcache(self, partial, start, end):
393 def _updatebranchcache(self, partial, start, end):
394 for r in xrange(start, end):
394 for r in xrange(start, end):
395 c = self.changectx(r)
395 c = self.changectx(r)
396 b = c.branch()
396 b = c.branch()
397 partial[b] = c.node()
397 partial[b] = c.node()
398
398
399 def lookup(self, key):
399 def lookup(self, key):
400 if key == '.':
400 if key == '.':
401 key, second = self.dirstate.parents()
401 key, second = self.dirstate.parents()
402 if key == nullid:
402 if key == nullid:
403 raise repo.RepoError(_("no revision checked out"))
403 raise repo.RepoError(_("no revision checked out"))
404 if second != nullid:
404 if second != nullid:
405 self.ui.warn(_("warning: working directory has two parents, "
405 self.ui.warn(_("warning: working directory has two parents, "
406 "tag '.' uses the first\n"))
406 "tag '.' uses the first\n"))
407 elif key == 'null':
407 elif key == 'null':
408 return nullid
408 return nullid
409 n = self.changelog._match(key)
409 n = self.changelog._match(key)
410 if n:
410 if n:
411 return n
411 return n
412 if key in self.tags():
412 if key in self.tags():
413 return self.tags()[key]
413 return self.tags()[key]
414 if key in self.branchtags():
414 if key in self.branchtags():
415 return self.branchtags()[key]
415 return self.branchtags()[key]
416 n = self.changelog._partialmatch(key)
416 n = self.changelog._partialmatch(key)
417 if n:
417 if n:
418 return n
418 return n
419 try:
419 try:
420 if len(key) == 20:
420 if len(key) == 20:
421 key = hex(key)
421 key = hex(key)
422 except:
422 except:
423 pass
423 pass
424 raise repo.RepoError(_("unknown revision '%s'") % key)
424 raise repo.RepoError(_("unknown revision '%s'") % key)
425
425
426 def dev(self):
426 def dev(self):
427 return os.lstat(self.path).st_dev
427 return os.lstat(self.path).st_dev
428
428
429 def local(self):
429 def local(self):
430 return True
430 return True
431
431
432 def join(self, f):
432 def join(self, f):
433 return os.path.join(self.path, f)
433 return os.path.join(self.path, f)
434
434
435 def sjoin(self, f):
435 def sjoin(self, f):
436 f = self.encodefn(f)
436 f = self.encodefn(f)
437 return os.path.join(self.spath, f)
437 return os.path.join(self.spath, f)
438
438
439 def wjoin(self, f):
439 def wjoin(self, f):
440 return os.path.join(self.root, f)
440 return os.path.join(self.root, f)
441
441
442 def file(self, f):
442 def file(self, f):
443 if f[0] == '/':
443 if f[0] == '/':
444 f = f[1:]
444 f = f[1:]
445 return filelog.filelog(self.sopener, f)
445 return filelog.filelog(self.sopener, f)
446
446
447 def changectx(self, changeid=None):
447 def changectx(self, changeid=None):
448 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
449
449
450 def workingctx(self):
450 def workingctx(self):
451 return context.workingctx(self)
451 return context.workingctx(self)
452
452
453 def parents(self, changeid=None):
453 def parents(self, changeid=None):
454 '''
454 '''
455 get list of changectxs for parents of changeid or working directory
455 get list of changectxs for parents of changeid or working directory
456 '''
456 '''
457 if changeid is None:
457 if changeid is None:
458 pl = self.dirstate.parents()
458 pl = self.dirstate.parents()
459 else:
459 else:
460 n = self.changelog.lookup(changeid)
460 n = self.changelog.lookup(changeid)
461 pl = self.changelog.parents(n)
461 pl = self.changelog.parents(n)
462 if pl[1] == nullid:
462 if pl[1] == nullid:
463 return [self.changectx(pl[0])]
463 return [self.changectx(pl[0])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
465
465
466 def filectx(self, path, changeid=None, fileid=None):
466 def filectx(self, path, changeid=None, fileid=None):
467 """changeid can be a changeset revision, node, or tag.
467 """changeid can be a changeset revision, node, or tag.
468 fileid can be a file revision or node."""
468 fileid can be a file revision or node."""
469 return context.filectx(self, path, changeid, fileid)
469 return context.filectx(self, path, changeid, fileid)
470
470
471 def getcwd(self):
471 def getcwd(self):
472 return self.dirstate.getcwd()
472 return self.dirstate.getcwd()
473
473
474 def pathto(self, f, cwd=None):
474 def pathto(self, f, cwd=None):
475 return self.dirstate.pathto(f, cwd)
475 return self.dirstate.pathto(f, cwd)
476
476
477 def wfile(self, f, mode='r'):
477 def wfile(self, f, mode='r'):
478 return self.wopener(f, mode)
478 return self.wopener(f, mode)
479
479
480 def _link(self, f):
480 def _link(self, f):
481 return os.path.islink(self.wjoin(f))
481 return os.path.islink(self.wjoin(f))
482
482
483 def _filter(self, filter, filename, data):
483 def _filter(self, filter, filename, data):
484 if filter not in self.filterpats:
484 if filter not in self.filterpats:
485 l = []
485 l = []
486 for pat, cmd in self.ui.configitems(filter):
486 for pat, cmd in self.ui.configitems(filter):
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 l.append((mf, cmd))
488 l.append((mf, cmd))
489 self.filterpats[filter] = l
489 self.filterpats[filter] = l
490
490
491 for mf, cmd in self.filterpats[filter]:
491 for mf, cmd in self.filterpats[filter]:
492 if mf(filename):
492 if mf(filename):
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 data = util.filter(data, cmd)
494 data = util.filter(data, cmd)
495 break
495 break
496
496
497 return data
497 return data
498
498
499 def wread(self, filename):
499 def wread(self, filename):
500 if self._link(filename):
500 if self._link(filename):
501 data = os.readlink(self.wjoin(filename))
501 data = os.readlink(self.wjoin(filename))
502 else:
502 else:
503 data = self.wopener(filename, 'r').read()
503 data = self.wopener(filename, 'r').read()
504 return self._filter("encode", filename, data)
504 return self._filter("encode", filename, data)
505
505
506 def wwrite(self, filename, data, flags):
506 def wwrite(self, filename, data, flags):
507 data = self._filter("decode", filename, data)
507 data = self._filter("decode", filename, data)
508 if "l" in flags:
508 if "l" in flags:
509 self.wopener.symlink(data, filename)
509 self.wopener.symlink(data, filename)
510 else:
510 else:
511 try:
511 try:
512 if self._link(filename):
512 if self._link(filename):
513 os.unlink(self.wjoin(filename))
513 os.unlink(self.wjoin(filename))
514 except OSError:
514 except OSError:
515 pass
515 pass
516 self.wopener(filename, 'w').write(data)
516 self.wopener(filename, 'w').write(data)
517 util.set_exec(self.wjoin(filename), "x" in flags)
517 util.set_exec(self.wjoin(filename), "x" in flags)
518
518
519 def wwritedata(self, filename, data):
519 def wwritedata(self, filename, data):
520 return self._filter("decode", filename, data)
520 return self._filter("decode", filename, data)
521
521
522 def transaction(self):
522 def transaction(self):
523 if self._transref and self._transref():
523 if self._transref and self._transref():
524 return self._transref().nest()
524 return self._transref().nest()
525
525
526 # save dirstate for rollback
526 # save dirstate for rollback
527 try:
527 try:
528 ds = self.opener("dirstate").read()
528 ds = self.opener("dirstate").read()
529 except IOError:
529 except IOError:
530 ds = ""
530 ds = ""
531 self.opener("journal.dirstate", "w").write(ds)
531 self.opener("journal.dirstate", "w").write(ds)
532
532
533 renames = [(self.sjoin("journal"), self.sjoin("undo")),
533 renames = [(self.sjoin("journal"), self.sjoin("undo")),
534 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
534 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
535 tr = transaction.transaction(self.ui.warn, self.sopener,
535 tr = transaction.transaction(self.ui.warn, self.sopener,
536 self.sjoin("journal"),
536 self.sjoin("journal"),
537 aftertrans(renames))
537 aftertrans(renames))
538 self._transref = weakref.ref(tr)
538 self._transref = weakref.ref(tr)
539 return tr
539 return tr
540
540
541 def recover(self):
541 def recover(self):
542 l = self.lock()
542 l = self.lock()
543 try:
543 try:
544 if os.path.exists(self.sjoin("journal")):
544 if os.path.exists(self.sjoin("journal")):
545 self.ui.status(_("rolling back interrupted transaction\n"))
545 self.ui.status(_("rolling back interrupted transaction\n"))
546 transaction.rollback(self.sopener, self.sjoin("journal"))
546 transaction.rollback(self.sopener, self.sjoin("journal"))
547 self.invalidate()
547 self.invalidate()
548 return True
548 return True
549 else:
549 else:
550 self.ui.warn(_("no interrupted transaction available\n"))
550 self.ui.warn(_("no interrupted transaction available\n"))
551 return False
551 return False
552 finally:
552 finally:
553 del l
553 del l
554
554
555 def rollback(self):
555 def rollback(self):
556 wlock = lock = None
556 wlock = lock = None
557 try:
557 try:
558 wlock = self.wlock()
558 wlock = self.wlock()
559 lock = self.lock()
559 lock = self.lock()
560 if os.path.exists(self.sjoin("undo")):
560 if os.path.exists(self.sjoin("undo")):
561 self.ui.status(_("rolling back last transaction\n"))
561 self.ui.status(_("rolling back last transaction\n"))
562 transaction.rollback(self.sopener, self.sjoin("undo"))
562 transaction.rollback(self.sopener, self.sjoin("undo"))
563 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
563 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
564 self.invalidate()
564 self.invalidate()
565 self.dirstate.invalidate()
565 self.dirstate.invalidate()
566 else:
566 else:
567 self.ui.warn(_("no rollback information available\n"))
567 self.ui.warn(_("no rollback information available\n"))
568 finally:
568 finally:
569 del lock, wlock
569 del lock, wlock
570
570
571 def invalidate(self):
571 def invalidate(self):
572 for a in "changelog manifest".split():
572 for a in "changelog manifest".split():
573 if hasattr(self, a):
573 if hasattr(self, a):
574 self.__delattr__(a)
574 self.__delattr__(a)
575 self.tagscache = None
575 self.tagscache = None
576 self._tagstypecache = None
576 self._tagstypecache = None
577 self.nodetagscache = None
577 self.nodetagscache = None
578
578
579 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
579 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
580 try:
580 try:
581 l = lock.lock(lockname, 0, releasefn, desc=desc)
581 l = lock.lock(lockname, 0, releasefn, desc=desc)
582 except lock.LockHeld, inst:
582 except lock.LockHeld, inst:
583 if not wait:
583 if not wait:
584 raise
584 raise
585 self.ui.warn(_("waiting for lock on %s held by %r\n") %
585 self.ui.warn(_("waiting for lock on %s held by %r\n") %
586 (desc, inst.locker))
586 (desc, inst.locker))
587 # default to 600 seconds timeout
587 # default to 600 seconds timeout
588 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
588 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
589 releasefn, desc=desc)
589 releasefn, desc=desc)
590 if acquirefn:
590 if acquirefn:
591 acquirefn()
591 acquirefn()
592 return l
592 return l
593
593
594 def lock(self, wait=True):
594 def lock(self, wait=True):
595 if self._lockref and self._lockref():
595 if self._lockref and self._lockref():
596 return self._lockref()
596 return self._lockref()
597
597
598 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
598 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
599 _('repository %s') % self.origroot)
599 _('repository %s') % self.origroot)
600 self._lockref = weakref.ref(l)
600 self._lockref = weakref.ref(l)
601 return l
601 return l
602
602
603 def wlock(self, wait=True):
603 def wlock(self, wait=True):
604 if self._wlockref and self._wlockref():
604 if self._wlockref and self._wlockref():
605 return self._wlockref()
605 return self._wlockref()
606
606
607 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
607 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
608 self.dirstate.invalidate, _('working directory of %s') %
608 self.dirstate.invalidate, _('working directory of %s') %
609 self.origroot)
609 self.origroot)
610 self._wlockref = weakref.ref(l)
610 self._wlockref = weakref.ref(l)
611 return l
611 return l
612
612
613 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
613 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
614 """
614 """
615 commit an individual file as part of a larger transaction
615 commit an individual file as part of a larger transaction
616 """
616 """
617
617
618 t = self.wread(fn)
618 t = self.wread(fn)
619 fl = self.file(fn)
619 fl = self.file(fn)
620 fp1 = manifest1.get(fn, nullid)
620 fp1 = manifest1.get(fn, nullid)
621 fp2 = manifest2.get(fn, nullid)
621 fp2 = manifest2.get(fn, nullid)
622
622
623 meta = {}
623 meta = {}
624 cp = self.dirstate.copied(fn)
624 cp = self.dirstate.copied(fn)
625 if cp:
625 if cp:
626 # Mark the new revision of this file as a copy of another
626 # Mark the new revision of this file as a copy of another
627 # file. This copy data will effectively act as a parent
627 # file. This copy data will effectively act as a parent
628 # of this new revision. If this is a merge, the first
628 # of this new revision. If this is a merge, the first
629 # parent will be the nullid (meaning "look up the copy data")
629 # parent will be the nullid (meaning "look up the copy data")
630 # and the second one will be the other parent. For example:
630 # and the second one will be the other parent. For example:
631 #
631 #
632 # 0 --- 1 --- 3 rev1 changes file foo
632 # 0 --- 1 --- 3 rev1 changes file foo
633 # \ / rev2 renames foo to bar and changes it
633 # \ / rev2 renames foo to bar and changes it
634 # \- 2 -/ rev3 should have bar with all changes and
634 # \- 2 -/ rev3 should have bar with all changes and
635 # should record that bar descends from
635 # should record that bar descends from
636 # bar in rev2 and foo in rev1
636 # bar in rev2 and foo in rev1
637 #
637 #
638 # this allows this merge to succeed:
638 # this allows this merge to succeed:
639 #
639 #
640 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
640 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
641 # \ / merging rev3 and rev4 should use bar@rev2
641 # \ / merging rev3 and rev4 should use bar@rev2
642 # \- 2 --- 4 as the merge base
642 # \- 2 --- 4 as the merge base
643 #
643 #
644 meta["copy"] = cp
644 meta["copy"] = cp
645 if not manifest2: # not a branch merge
645 if not manifest2: # not a branch merge
646 meta["copyrev"] = hex(manifest1.get(cp, nullid))
646 meta["copyrev"] = hex(manifest1.get(cp, nullid))
647 fp2 = nullid
647 fp2 = nullid
648 elif fp2 != nullid: # copied on remote side
648 elif fp2 != nullid: # copied on remote side
649 meta["copyrev"] = hex(manifest1.get(cp, nullid))
649 meta["copyrev"] = hex(manifest1.get(cp, nullid))
650 elif fp1 != nullid: # copied on local side, reversed
650 elif fp1 != nullid: # copied on local side, reversed
651 meta["copyrev"] = hex(manifest2.get(cp))
651 meta["copyrev"] = hex(manifest2.get(cp))
652 fp2 = fp1
652 fp2 = fp1
653 elif cp in manifest2: # directory rename on local side
653 elif cp in manifest2: # directory rename on local side
654 meta["copyrev"] = hex(manifest2[cp])
654 meta["copyrev"] = hex(manifest2[cp])
655 else: # directory rename on remote side
655 else: # directory rename on remote side
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
657 self.ui.debug(_(" %s: copy %s:%s\n") %
657 self.ui.debug(_(" %s: copy %s:%s\n") %
658 (fn, cp, meta["copyrev"]))
658 (fn, cp, meta["copyrev"]))
659 fp1 = nullid
659 fp1 = nullid
660 elif fp2 != nullid:
660 elif fp2 != nullid:
661 # is one parent an ancestor of the other?
661 # is one parent an ancestor of the other?
662 fpa = fl.ancestor(fp1, fp2)
662 fpa = fl.ancestor(fp1, fp2)
663 if fpa == fp1:
663 if fpa == fp1:
664 fp1, fp2 = fp2, nullid
664 fp1, fp2 = fp2, nullid
665 elif fpa == fp2:
665 elif fpa == fp2:
666 fp2 = nullid
666 fp2 = nullid
667
667
668 # is the file unmodified from the parent? report existing entry
668 # is the file unmodified from the parent? report existing entry
669 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
669 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
670 return fp1
670 return fp1
671
671
672 changelist.append(fn)
672 changelist.append(fn)
673 return fl.add(t, meta, tr, linkrev, fp1, fp2)
673 return fl.add(t, meta, tr, linkrev, fp1, fp2)
674
674
675 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
675 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
676 if p1 is None:
676 if p1 is None:
677 p1, p2 = self.dirstate.parents()
677 p1, p2 = self.dirstate.parents()
678 return self.commit(files=files, text=text, user=user, date=date,
678 return self.commit(files=files, text=text, user=user, date=date,
679 p1=p1, p2=p2, extra=extra, empty_ok=True)
679 p1=p1, p2=p2, extra=extra, empty_ok=True)
680
680
681 def commit(self, files=None, text="", user=None, date=None,
681 def commit(self, files=None, text="", user=None, date=None,
682 match=util.always, force=False, force_editor=False,
682 match=util.always, force=False, force_editor=False,
683 p1=None, p2=None, extra={}, empty_ok=False):
683 p1=None, p2=None, extra={}, empty_ok=False):
684 wlock = lock = tr = None
684 wlock = lock = tr = None
685 valid = 0 # don't save the dirstate if this isn't set
685 valid = 0 # don't save the dirstate if this isn't set
686 try:
686 try:
687 commit = []
687 commit = []
688 remove = []
688 remove = []
689 changed = []
689 changed = []
690 use_dirstate = (p1 is None) # not rawcommit
690 use_dirstate = (p1 is None) # not rawcommit
691 extra = extra.copy()
691 extra = extra.copy()
692
692
693 if use_dirstate:
693 if use_dirstate:
694 if files:
694 if files:
695 for f in files:
695 for f in files:
696 s = self.dirstate[f]
696 s = self.dirstate[f]
697 if s in 'nma':
697 if s in 'nma':
698 commit.append(f)
698 commit.append(f)
699 elif s == 'r':
699 elif s == 'r':
700 remove.append(f)
700 remove.append(f)
701 else:
701 else:
702 self.ui.warn(_("%s not tracked!\n") % f)
702 self.ui.warn(_("%s not tracked!\n") % f)
703 else:
703 else:
704 changes = self.status(match=match)[:5]
704 changes = self.status(match=match)[:5]
705 modified, added, removed, deleted, unknown = changes
705 modified, added, removed, deleted, unknown = changes
706 commit = modified + added
706 commit = modified + added
707 remove = removed
707 remove = removed
708 else:
708 else:
709 commit = files
709 commit = files
710
710
711 if use_dirstate:
711 if use_dirstate:
712 p1, p2 = self.dirstate.parents()
712 p1, p2 = self.dirstate.parents()
713 update_dirstate = True
713 update_dirstate = True
714 else:
714 else:
715 p1, p2 = p1, p2 or nullid
715 p1, p2 = p1, p2 or nullid
716 update_dirstate = (self.dirstate.parents()[0] == p1)
716 update_dirstate = (self.dirstate.parents()[0] == p1)
717
717
718 c1 = self.changelog.read(p1)
718 c1 = self.changelog.read(p1)
719 c2 = self.changelog.read(p2)
719 c2 = self.changelog.read(p2)
720 m1 = self.manifest.read(c1[0]).copy()
720 m1 = self.manifest.read(c1[0]).copy()
721 m2 = self.manifest.read(c2[0])
721 m2 = self.manifest.read(c2[0])
722
722
723 if use_dirstate:
723 if use_dirstate:
724 branchname = self.workingctx().branch()
724 branchname = self.workingctx().branch()
725 try:
725 try:
726 branchname = branchname.decode('UTF-8').encode('UTF-8')
726 branchname = branchname.decode('UTF-8').encode('UTF-8')
727 except UnicodeDecodeError:
727 except UnicodeDecodeError:
728 raise util.Abort(_('branch name not in UTF-8!'))
728 raise util.Abort(_('branch name not in UTF-8!'))
729 else:
729 else:
730 branchname = ""
730 branchname = ""
731
731
732 if use_dirstate:
732 if use_dirstate:
733 oldname = c1[5].get("branch") # stored in UTF-8
733 oldname = c1[5].get("branch") # stored in UTF-8
734 if (not commit and not remove and not force and p2 == nullid
734 if (not commit and not remove and not force and p2 == nullid
735 and branchname == oldname):
735 and branchname == oldname):
736 self.ui.status(_("nothing changed\n"))
736 self.ui.status(_("nothing changed\n"))
737 return None
737 return None
738
738
739 xp1 = hex(p1)
739 xp1 = hex(p1)
740 if p2 == nullid: xp2 = ''
740 if p2 == nullid: xp2 = ''
741 else: xp2 = hex(p2)
741 else: xp2 = hex(p2)
742
742
743 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
743 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
744
744
745 wlock = self.wlock()
745 wlock = self.wlock()
746 lock = self.lock()
746 lock = self.lock()
747 tr = self.transaction()
747 tr = self.transaction()
748 trp = weakref.proxy(tr)
748 trp = weakref.proxy(tr)
749
749
750 # check in files
750 # check in files
751 new = {}
751 new = {}
752 linkrev = self.changelog.count()
752 linkrev = self.changelog.count()
753 commit.sort()
753 commit.sort()
754 is_exec = util.execfunc(self.root, m1.execf)
754 is_exec = util.execfunc(self.root, m1.execf)
755 is_link = util.linkfunc(self.root, m1.linkf)
755 is_link = util.linkfunc(self.root, m1.linkf)
756 for f in commit:
756 for f in commit:
757 self.ui.note(f + "\n")
757 self.ui.note(f + "\n")
758 try:
758 try:
759 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
759 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
760 new_exec = is_exec(f)
760 new_exec = is_exec(f)
761 new_link = is_link(f)
761 new_link = is_link(f)
762 if ((not changed or changed[-1] != f) and
762 if ((not changed or changed[-1] != f) and
763 m2.get(f) != new[f]):
763 m2.get(f) != new[f]):
764 # mention the file in the changelog if some
764 # mention the file in the changelog if some
765 # flag changed, even if there was no content
765 # flag changed, even if there was no content
766 # change.
766 # change.
767 old_exec = m1.execf(f)
767 old_exec = m1.execf(f)
768 old_link = m1.linkf(f)
768 old_link = m1.linkf(f)
769 if old_exec != new_exec or old_link != new_link:
769 if old_exec != new_exec or old_link != new_link:
770 changed.append(f)
770 changed.append(f)
771 m1.set(f, new_exec, new_link)
771 m1.set(f, new_exec, new_link)
772 if use_dirstate:
772 if use_dirstate:
773 self.dirstate.normal(f)
773 self.dirstate.normal(f)
774
774
775 except (OSError, IOError):
775 except (OSError, IOError):
776 if use_dirstate:
776 if use_dirstate:
777 self.ui.warn(_("trouble committing %s!\n") % f)
777 self.ui.warn(_("trouble committing %s!\n") % f)
778 raise
778 raise
779 else:
779 else:
780 remove.append(f)
780 remove.append(f)
781
781
782 # update manifest
782 # update manifest
783 m1.update(new)
783 m1.update(new)
784 remove.sort()
784 remove.sort()
785 removed = []
785 removed = []
786
786
787 for f in remove:
787 for f in remove:
788 if f in m1:
788 if f in m1:
789 del m1[f]
789 del m1[f]
790 removed.append(f)
790 removed.append(f)
791 elif f in m2:
791 elif f in m2:
792 removed.append(f)
792 removed.append(f)
793 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
793 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
794 (new, removed))
794 (new, removed))
795
795
796 # add changeset
796 # add changeset
797 new = new.keys()
797 new = new.keys()
798 new.sort()
798 new.sort()
799
799
800 user = user or self.ui.username()
800 user = user or self.ui.username()
801 if (not empty_ok and not text) or force_editor:
801 if (not empty_ok and not text) or force_editor:
802 edittext = []
802 edittext = []
803 if text:
803 if text:
804 edittext.append(text)
804 edittext.append(text)
805 edittext.append("")
805 edittext.append("")
806 edittext.append("HG: user: %s" % user)
806 edittext.append("HG: user: %s" % user)
807 if p2 != nullid:
807 if p2 != nullid:
808 edittext.append("HG: branch merge")
808 edittext.append("HG: branch merge")
809 if branchname:
809 if branchname:
810 edittext.append("HG: branch %s" % util.tolocal(branchname))
810 edittext.append("HG: branch %s" % util.tolocal(branchname))
811 edittext.extend(["HG: changed %s" % f for f in changed])
811 edittext.extend(["HG: changed %s" % f for f in changed])
812 edittext.extend(["HG: removed %s" % f for f in removed])
812 edittext.extend(["HG: removed %s" % f for f in removed])
813 if not changed and not remove:
813 if not changed and not remove:
814 edittext.append("HG: no files changed")
814 edittext.append("HG: no files changed")
815 edittext.append("")
815 edittext.append("")
816 # run editor in the repository root
816 # run editor in the repository root
817 olddir = os.getcwd()
817 olddir = os.getcwd()
818 os.chdir(self.root)
818 os.chdir(self.root)
819 text = self.ui.edit("\n".join(edittext), user)
819 text = self.ui.edit("\n".join(edittext), user)
820 os.chdir(olddir)
820 os.chdir(olddir)
821
821
822 if branchname:
822 if branchname:
823 extra["branch"] = branchname
823 extra["branch"] = branchname
824
824
825 if use_dirstate:
825 if use_dirstate:
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
827 while lines and not lines[0]:
827 while lines and not lines[0]:
828 del lines[0]
828 del lines[0]
829 if not lines:
829 if not lines:
830 return None
830 return None
831 text = '\n'.join(lines)
831 text = '\n'.join(lines)
832
832
833 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
833 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
834 user, date, extra)
834 user, date, extra)
835 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
835 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
836 parent2=xp2)
836 parent2=xp2)
837 tr.close()
837 tr.close()
838
838
839 if self.branchcache and "branch" in extra:
839 if self.branchcache and "branch" in extra:
840 self.branchcache[util.tolocal(extra["branch"])] = n
840 self.branchcache[util.tolocal(extra["branch"])] = n
841
841
842 if use_dirstate or update_dirstate:
842 if use_dirstate or update_dirstate:
843 self.dirstate.setparents(n)
843 self.dirstate.setparents(n)
844 if use_dirstate:
844 if use_dirstate:
845 for f in removed:
845 for f in removed:
846 self.dirstate.forget(f)
846 self.dirstate.forget(f)
847 valid = 1 # our dirstate updates are complete
847 valid = 1 # our dirstate updates are complete
848
848
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
850 return n
850 return n
851 finally:
851 finally:
852 if not valid: # don't save our updated dirstate
852 if not valid: # don't save our updated dirstate
853 self.dirstate.invalidate()
853 self.dirstate.invalidate()
854 del tr, lock, wlock
854 del tr, lock, wlock
855
855
856 def walk(self, node=None, files=[], match=util.always, badmatch=None):
856 def walk(self, node=None, files=[], match=util.always, badmatch=None):
857 '''
857 '''
858 walk recursively through the directory tree or a given
858 walk recursively through the directory tree or a given
859 changeset, finding all files matched by the match
859 changeset, finding all files matched by the match
860 function
860 function
861
861
862 results are yielded in a tuple (src, filename), where src
862 results are yielded in a tuple (src, filename), where src
863 is one of:
863 is one of:
864 'f' the file was found in the directory tree
864 'f' the file was found in the directory tree
865 'm' the file was only in the dirstate and not in the tree
865 'm' the file was only in the dirstate and not in the tree
866 'b' file was not found and matched badmatch
866 'b' file was not found and matched badmatch
867 '''
867 '''
868
868
869 if node:
869 if node:
870 fdict = dict.fromkeys(files)
870 fdict = dict.fromkeys(files)
871 # for dirstate.walk, files=['.'] means "walk the whole tree".
871 # for dirstate.walk, files=['.'] means "walk the whole tree".
872 # follow that here, too
872 # follow that here, too
873 fdict.pop('.', None)
873 fdict.pop('.', None)
874 mdict = self.manifest.read(self.changelog.read(node)[0])
874 mdict = self.manifest.read(self.changelog.read(node)[0])
875 mfiles = mdict.keys()
875 mfiles = mdict.keys()
876 mfiles.sort()
876 mfiles.sort()
877 for fn in mfiles:
877 for fn in mfiles:
878 for ffn in fdict:
878 for ffn in fdict:
879 # match if the file is the exact name or a directory
879 # match if the file is the exact name or a directory
880 if ffn == fn or fn.startswith("%s/" % ffn):
880 if ffn == fn or fn.startswith("%s/" % ffn):
881 del fdict[ffn]
881 del fdict[ffn]
882 break
882 break
883 if match(fn):
883 if match(fn):
884 yield 'm', fn
884 yield 'm', fn
885 ffiles = fdict.keys()
885 ffiles = fdict.keys()
886 ffiles.sort()
886 ffiles.sort()
887 for fn in ffiles:
887 for fn in ffiles:
888 if badmatch and badmatch(fn):
888 if badmatch and badmatch(fn):
889 if match(fn):
889 if match(fn):
890 yield 'b', fn
890 yield 'b', fn
891 else:
891 else:
892 self.ui.warn(_('%s: No such file in rev %s\n')
892 self.ui.warn(_('%s: No such file in rev %s\n')
893 % (self.pathto(fn), short(node)))
893 % (self.pathto(fn), short(node)))
894 else:
894 else:
895 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
895 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
896 yield src, fn
896 yield src, fn
897
897
898 def status(self, node1=None, node2=None, files=[], match=util.always,
898 def status(self, node1=None, node2=None, files=[], match=util.always,
899 list_ignored=False, list_clean=False):
899 list_ignored=False, list_clean=False):
900 """return status of files between two nodes or node and working directory
900 """return status of files between two nodes or node and working directory
901
901
902 If node1 is None, use the first dirstate parent instead.
902 If node1 is None, use the first dirstate parent instead.
903 If node2 is None, compare node1 with working directory.
903 If node2 is None, compare node1 with working directory.
904 """
904 """
905
905
906 def fcmp(fn, getnode):
906 def fcmp(fn, getnode):
907 t1 = self.wread(fn)
907 t1 = self.wread(fn)
908 return self.file(fn).cmp(getnode(fn), t1)
908 return self.file(fn).cmp(getnode(fn), t1)
909
909
910 def mfmatches(node):
910 def mfmatches(node):
911 change = self.changelog.read(node)
911 change = self.changelog.read(node)
912 mf = self.manifest.read(change[0]).copy()
912 mf = self.manifest.read(change[0]).copy()
913 for fn in mf.keys():
913 for fn in mf.keys():
914 if not match(fn):
914 if not match(fn):
915 del mf[fn]
915 del mf[fn]
916 return mf
916 return mf
917
917
918 modified, added, removed, deleted, unknown = [], [], [], [], []
918 modified, added, removed, deleted, unknown = [], [], [], [], []
919 ignored, clean = [], []
919 ignored, clean = [], []
920
920
921 compareworking = False
921 compareworking = False
922 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
922 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
923 compareworking = True
923 compareworking = True
924
924
925 if not compareworking:
925 if not compareworking:
926 # read the manifest from node1 before the manifest from node2,
926 # read the manifest from node1 before the manifest from node2,
927 # so that we'll hit the manifest cache if we're going through
927 # so that we'll hit the manifest cache if we're going through
928 # all the revisions in parent->child order.
928 # all the revisions in parent->child order.
929 mf1 = mfmatches(node1)
929 mf1 = mfmatches(node1)
930
930
931 # are we comparing the working directory?
931 # are we comparing the working directory?
932 if not node2:
932 if not node2:
933 (lookup, modified, added, removed, deleted, unknown,
933 (lookup, modified, added, removed, deleted, unknown,
934 ignored, clean) = self.dirstate.status(files, match,
934 ignored, clean) = self.dirstate.status(files, match,
935 list_ignored, list_clean)
935 list_ignored, list_clean)
936
936
937 # are we comparing working dir against its parent?
937 # are we comparing working dir against its parent?
938 if compareworking:
938 if compareworking:
939 if lookup:
939 if lookup:
940 fixup = []
940 fixup = []
941 # do a full compare of any files that might have changed
941 # do a full compare of any files that might have changed
942 ctx = self.changectx()
942 ctx = self.changectx()
943 for f in lookup:
943 for f in lookup:
944 if f not in ctx or ctx[f].cmp(self.wread(f)):
944 if f not in ctx or ctx[f].cmp(self.wread(f)):
945 modified.append(f)
945 modified.append(f)
946 else:
946 else:
947 fixup.append(f)
947 fixup.append(f)
948 if list_clean:
948 if list_clean:
949 clean.append(f)
949 clean.append(f)
950
950
951 # update dirstate for files that are actually clean
951 # update dirstate for files that are actually clean
952 if fixup:
952 if fixup:
953 wlock = None
953 wlock = None
954 try:
954 try:
955 try:
955 try:
956 wlock = self.wlock(False)
956 wlock = self.wlock(False)
957 except lock.LockException:
957 except lock.LockException:
958 pass
958 pass
959 if wlock:
959 if wlock:
960 for f in fixup:
960 for f in fixup:
961 self.dirstate.normal(f)
961 self.dirstate.normal(f)
962 finally:
962 finally:
963 del wlock
963 del wlock
964 else:
964 else:
965 # we are comparing working dir against non-parent
965 # we are comparing working dir against non-parent
966 # generate a pseudo-manifest for the working dir
966 # generate a pseudo-manifest for the working dir
967 # XXX: create it in dirstate.py ?
967 # XXX: create it in dirstate.py ?
968 mf2 = mfmatches(self.dirstate.parents()[0])
968 mf2 = mfmatches(self.dirstate.parents()[0])
969 is_exec = util.execfunc(self.root, mf2.execf)
969 is_exec = util.execfunc(self.root, mf2.execf)
970 is_link = util.linkfunc(self.root, mf2.linkf)
970 is_link = util.linkfunc(self.root, mf2.linkf)
971 for f in lookup + modified + added:
971 for f in lookup + modified + added:
972 mf2[f] = ""
972 mf2[f] = ""
973 mf2.set(f, is_exec(f), is_link(f))
973 mf2.set(f, is_exec(f), is_link(f))
974 for f in removed:
974 for f in removed:
975 if f in mf2:
975 if f in mf2:
976 del mf2[f]
976 del mf2[f]
977
977
978 else:
978 else:
979 # we are comparing two revisions
979 # we are comparing two revisions
980 mf2 = mfmatches(node2)
980 mf2 = mfmatches(node2)
981
981
982 if not compareworking:
982 if not compareworking:
983 # flush lists from dirstate before comparing manifests
983 # flush lists from dirstate before comparing manifests
984 modified, added, clean = [], [], []
984 modified, added, clean = [], [], []
985
985
986 # make sure to sort the files so we talk to the disk in a
986 # make sure to sort the files so we talk to the disk in a
987 # reasonable order
987 # reasonable order
988 mf2keys = mf2.keys()
988 mf2keys = mf2.keys()
989 mf2keys.sort()
989 mf2keys.sort()
990 getnode = lambda fn: mf1.get(fn, nullid)
990 getnode = lambda fn: mf1.get(fn, nullid)
991 for fn in mf2keys:
991 for fn in mf2keys:
992 if mf1.has_key(fn):
992 if mf1.has_key(fn):
993 if (mf1.flags(fn) != mf2.flags(fn) or
993 if (mf1.flags(fn) != mf2.flags(fn) or
994 (mf1[fn] != mf2[fn] and
994 (mf1[fn] != mf2[fn] and
995 (mf2[fn] != "" or fcmp(fn, getnode)))):
995 (mf2[fn] != "" or fcmp(fn, getnode)))):
996 modified.append(fn)
996 modified.append(fn)
997 elif list_clean:
997 elif list_clean:
998 clean.append(fn)
998 clean.append(fn)
999 del mf1[fn]
999 del mf1[fn]
1000 else:
1000 else:
1001 added.append(fn)
1001 added.append(fn)
1002
1002
1003 removed = mf1.keys()
1003 removed = mf1.keys()
1004
1004
1005 # sort and return results:
1005 # sort and return results:
1006 for l in modified, added, removed, deleted, unknown, ignored, clean:
1006 for l in modified, added, removed, deleted, unknown, ignored, clean:
1007 l.sort()
1007 l.sort()
1008 return (modified, added, removed, deleted, unknown, ignored, clean)
1008 return (modified, added, removed, deleted, unknown, ignored, clean)
1009
1009
1010 def add(self, list):
1010 def add(self, list):
1011 wlock = self.wlock()
1011 wlock = self.wlock()
1012 try:
1012 try:
1013 for f in list:
1013 for f in list:
1014 p = self.wjoin(f)
1014 p = self.wjoin(f)
1015 try:
1015 try:
1016 st = os.lstat(p)
1016 st = os.lstat(p)
1017 except:
1017 except:
1018 self.ui.warn(_("%s does not exist!\n") % f)
1018 self.ui.warn(_("%s does not exist!\n") % f)
1019 continue
1019 continue
1020 if st.st_size > 10000000:
1020 if st.st_size > 10000000:
1021 self.ui.warn(_("%s: files over 10MB may cause memory and"
1021 self.ui.warn(_("%s: files over 10MB may cause memory and"
1022 " performance problems\n"
1022 " performance problems\n"
1023 "(use 'hg revert %s' to unadd the file)\n")
1023 "(use 'hg revert %s' to unadd the file)\n")
1024 % (f, f))
1024 % (f, f))
1025 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1025 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1026 self.ui.warn(_("%s not added: only files and symlinks "
1026 self.ui.warn(_("%s not added: only files and symlinks "
1027 "supported currently\n") % f)
1027 "supported currently\n") % f)
1028 elif self.dirstate[f] in 'amn':
1028 elif self.dirstate[f] in 'amn':
1029 self.ui.warn(_("%s already tracked!\n") % f)
1029 self.ui.warn(_("%s already tracked!\n") % f)
1030 elif self.dirstate[f] == 'r':
1030 elif self.dirstate[f] == 'r':
1031 self.dirstate.normallookup(f)
1031 self.dirstate.normallookup(f)
1032 else:
1032 else:
1033 self.dirstate.add(f)
1033 self.dirstate.add(f)
1034 finally:
1034 finally:
1035 del wlock
1035 del wlock
1036
1036
1037 def forget(self, list):
1037 def forget(self, list):
1038 wlock = self.wlock()
1038 wlock = self.wlock()
1039 try:
1039 try:
1040 for f in list:
1040 for f in list:
1041 if self.dirstate[f] != 'a':
1041 if self.dirstate[f] != 'a':
1042 self.ui.warn(_("%s not added!\n") % f)
1042 self.ui.warn(_("%s not added!\n") % f)
1043 else:
1043 else:
1044 self.dirstate.forget(f)
1044 self.dirstate.forget(f)
1045 finally:
1045 finally:
1046 del wlock
1046 del wlock
1047
1047
1048 def remove(self, list, unlink=False):
1048 def remove(self, list, unlink=False):
1049 wlock = None
1049 wlock = None
1050 try:
1050 try:
1051 if unlink:
1051 if unlink:
1052 for f in list:
1052 for f in list:
1053 try:
1053 try:
1054 util.unlink(self.wjoin(f))
1054 util.unlink(self.wjoin(f))
1055 except OSError, inst:
1055 except OSError, inst:
1056 if inst.errno != errno.ENOENT:
1056 if inst.errno != errno.ENOENT:
1057 raise
1057 raise
1058 wlock = self.wlock()
1058 wlock = self.wlock()
1059 for f in list:
1059 for f in list:
1060 if unlink and os.path.exists(self.wjoin(f)):
1060 if unlink and os.path.exists(self.wjoin(f)):
1061 self.ui.warn(_("%s still exists!\n") % f)
1061 self.ui.warn(_("%s still exists!\n") % f)
1062 elif self.dirstate[f] == 'a':
1062 elif self.dirstate[f] == 'a':
1063 self.dirstate.forget(f)
1063 self.dirstate.forget(f)
1064 elif f not in self.dirstate:
1064 elif f not in self.dirstate:
1065 self.ui.warn(_("%s not tracked!\n") % f)
1065 self.ui.warn(_("%s not tracked!\n") % f)
1066 else:
1066 else:
1067 self.dirstate.remove(f)
1067 self.dirstate.remove(f)
1068 finally:
1068 finally:
1069 del wlock
1069 del wlock
1070
1070
1071 def undelete(self, list):
1071 def undelete(self, list):
1072 wlock = None
1072 wlock = None
1073 try:
1073 try:
1074 manifests = [self.manifest.read(self.changelog.read(p)[0])
1074 manifests = [self.manifest.read(self.changelog.read(p)[0])
1075 for p in self.dirstate.parents() if p != nullid]
1075 for p in self.dirstate.parents() if p != nullid]
1076 wlock = self.wlock()
1076 wlock = self.wlock()
1077 for f in list:
1077 for f in list:
1078 if self.dirstate[f] != 'r':
1078 if self.dirstate[f] != 'r':
1079 self.ui.warn("%s not removed!\n" % f)
1079 self.ui.warn("%s not removed!\n" % f)
1080 else:
1080 else:
1081 m = f in manifests[0] and manifests[0] or manifests[1]
1081 m = f in manifests[0] and manifests[0] or manifests[1]
1082 t = self.file(f).read(m[f])
1082 t = self.file(f).read(m[f])
1083 self.wwrite(f, t, m.flags(f))
1083 self.wwrite(f, t, m.flags(f))
1084 self.dirstate.normal(f)
1084 self.dirstate.normal(f)
1085 finally:
1085 finally:
1086 del wlock
1086 del wlock
1087
1087
1088 def copy(self, source, dest):
1088 def copy(self, source, dest):
1089 wlock = None
1089 wlock = None
1090 try:
1090 try:
1091 p = self.wjoin(dest)
1091 p = self.wjoin(dest)
1092 if not (os.path.exists(p) or os.path.islink(p)):
1092 if not (os.path.exists(p) or os.path.islink(p)):
1093 self.ui.warn(_("%s does not exist!\n") % dest)
1093 self.ui.warn(_("%s does not exist!\n") % dest)
1094 elif not (os.path.isfile(p) or os.path.islink(p)):
1094 elif not (os.path.isfile(p) or os.path.islink(p)):
1095 self.ui.warn(_("copy failed: %s is not a file or a "
1095 self.ui.warn(_("copy failed: %s is not a file or a "
1096 "symbolic link\n") % dest)
1096 "symbolic link\n") % dest)
1097 else:
1097 else:
1098 wlock = self.wlock()
1098 wlock = self.wlock()
1099 if dest not in self.dirstate:
1099 if dest not in self.dirstate:
1100 self.dirstate.add(dest)
1100 self.dirstate.add(dest)
1101 self.dirstate.copy(source, dest)
1101 self.dirstate.copy(source, dest)
1102 finally:
1102 finally:
1103 del wlock
1103 del wlock
1104
1104
1105 def heads(self, start=None):
1105 def heads(self, start=None):
1106 heads = self.changelog.heads(start)
1106 heads = self.changelog.heads(start)
1107 # sort the output in rev descending order
1107 # sort the output in rev descending order
1108 heads = [(-self.changelog.rev(h), h) for h in heads]
1108 heads = [(-self.changelog.rev(h), h) for h in heads]
1109 heads.sort()
1109 heads.sort()
1110 return [n for (r, n) in heads]
1110 return [n for (r, n) in heads]
1111
1111
1112 def branchheads(self, branch, start=None):
1112 def branchheads(self, branch, start=None):
1113 branches = self.branchtags()
1113 branches = self.branchtags()
1114 if branch not in branches:
1114 if branch not in branches:
1115 return []
1115 return []
1116 # The basic algorithm is this:
1116 # The basic algorithm is this:
1117 #
1117 #
1118 # Start from the branch tip since there are no later revisions that can
1118 # Start from the branch tip since there are no later revisions that can
1119 # possibly be in this branch, and the tip is a guaranteed head.
1119 # possibly be in this branch, and the tip is a guaranteed head.
1120 #
1120 #
1121 # Remember the tip's parents as the first ancestors, since these by
1121 # Remember the tip's parents as the first ancestors, since these by
1122 # definition are not heads.
1122 # definition are not heads.
1123 #
1123 #
1124 # Step backwards from the brach tip through all the revisions. We are
1124 # Step backwards from the brach tip through all the revisions. We are
1125 # guaranteed by the rules of Mercurial that we will now be visiting the
1125 # guaranteed by the rules of Mercurial that we will now be visiting the
1126 # nodes in reverse topological order (children before parents).
1126 # nodes in reverse topological order (children before parents).
1127 #
1127 #
1128 # If a revision is one of the ancestors of a head then we can toss it
1128 # If a revision is one of the ancestors of a head then we can toss it
1129 # out of the ancestors set (we've already found it and won't be
1129 # out of the ancestors set (we've already found it and won't be
1130 # visiting it again) and put its parents in the ancestors set.
1130 # visiting it again) and put its parents in the ancestors set.
1131 #
1131 #
1132 # Otherwise, if a revision is in the branch it's another head, since it
1132 # Otherwise, if a revision is in the branch it's another head, since it
1133 # wasn't in the ancestor list of an existing head. So add it to the
1133 # wasn't in the ancestor list of an existing head. So add it to the
1134 # head list, and add its parents to the ancestor list.
1134 # head list, and add its parents to the ancestor list.
1135 #
1135 #
1136 # If it is not in the branch ignore it.
1136 # If it is not in the branch ignore it.
1137 #
1137 #
1138 # Once we have a list of heads, use nodesbetween to filter out all the
1138 # Once we have a list of heads, use nodesbetween to filter out all the
1139 # heads that cannot be reached from startrev. There may be a more
1139 # heads that cannot be reached from startrev. There may be a more
1140 # efficient way to do this as part of the previous algorithm.
1140 # efficient way to do this as part of the previous algorithm.
1141
1141
1142 set = util.set
1142 set = util.set
1143 heads = [self.changelog.rev(branches[branch])]
1143 heads = [self.changelog.rev(branches[branch])]
1144 # Don't care if ancestors contains nullrev or not.
1144 # Don't care if ancestors contains nullrev or not.
1145 ancestors = set(self.changelog.parentrevs(heads[0]))
1145 ancestors = set(self.changelog.parentrevs(heads[0]))
1146 for rev in xrange(heads[0] - 1, nullrev, -1):
1146 for rev in xrange(heads[0] - 1, nullrev, -1):
1147 if rev in ancestors:
1147 if rev in ancestors:
1148 ancestors.update(self.changelog.parentrevs(rev))
1148 ancestors.update(self.changelog.parentrevs(rev))
1149 ancestors.remove(rev)
1149 ancestors.remove(rev)
1150 elif self.changectx(rev).branch() == branch:
1150 elif self.changectx(rev).branch() == branch:
1151 heads.append(rev)
1151 heads.append(rev)
1152 ancestors.update(self.changelog.parentrevs(rev))
1152 ancestors.update(self.changelog.parentrevs(rev))
1153 heads = [self.changelog.node(rev) for rev in heads]
1153 heads = [self.changelog.node(rev) for rev in heads]
1154 if start is not None:
1154 if start is not None:
1155 heads = self.changelog.nodesbetween([start], heads)[2]
1155 heads = self.changelog.nodesbetween([start], heads)[2]
1156 return heads
1156 return heads
1157
1157
1158 def branches(self, nodes):
1158 def branches(self, nodes):
1159 if not nodes:
1159 if not nodes:
1160 nodes = [self.changelog.tip()]
1160 nodes = [self.changelog.tip()]
1161 b = []
1161 b = []
1162 for n in nodes:
1162 for n in nodes:
1163 t = n
1163 t = n
1164 while 1:
1164 while 1:
1165 p = self.changelog.parents(n)
1165 p = self.changelog.parents(n)
1166 if p[1] != nullid or p[0] == nullid:
1166 if p[1] != nullid or p[0] == nullid:
1167 b.append((t, n, p[0], p[1]))
1167 b.append((t, n, p[0], p[1]))
1168 break
1168 break
1169 n = p[0]
1169 n = p[0]
1170 return b
1170 return b
1171
1171
1172 def between(self, pairs):
1172 def between(self, pairs):
1173 r = []
1173 r = []
1174
1174
1175 for top, bottom in pairs:
1175 for top, bottom in pairs:
1176 n, l, i = top, [], 0
1176 n, l, i = top, [], 0
1177 f = 1
1177 f = 1
1178
1178
1179 while n != bottom:
1179 while n != bottom:
1180 p = self.changelog.parents(n)[0]
1180 p = self.changelog.parents(n)[0]
1181 if i == f:
1181 if i == f:
1182 l.append(n)
1182 l.append(n)
1183 f = f * 2
1183 f = f * 2
1184 n = p
1184 n = p
1185 i += 1
1185 i += 1
1186
1186
1187 r.append(l)
1187 r.append(l)
1188
1188
1189 return r
1189 return r
1190
1190
1191 def findincoming(self, remote, base=None, heads=None, force=False):
1191 def findincoming(self, remote, base=None, heads=None, force=False):
1192 """Return list of roots of the subsets of missing nodes from remote
1192 """Return list of roots of the subsets of missing nodes from remote
1193
1193
1194 If base dict is specified, assume that these nodes and their parents
1194 If base dict is specified, assume that these nodes and their parents
1195 exist on the remote side and that no child of a node of base exists
1195 exist on the remote side and that no child of a node of base exists
1196 in both remote and self.
1196 in both remote and self.
1197 Furthermore base will be updated to include the nodes that exists
1197 Furthermore base will be updated to include the nodes that exists
1198 in self and remote but no children exists in self and remote.
1198 in self and remote but no children exists in self and remote.
1199 If a list of heads is specified, return only nodes which are heads
1199 If a list of heads is specified, return only nodes which are heads
1200 or ancestors of these heads.
1200 or ancestors of these heads.
1201
1201
1202 All the ancestors of base are in self and in remote.
1202 All the ancestors of base are in self and in remote.
1203 All the descendants of the list returned are missing in self.
1203 All the descendants of the list returned are missing in self.
1204 (and so we know that the rest of the nodes are missing in remote, see
1204 (and so we know that the rest of the nodes are missing in remote, see
1205 outgoing)
1205 outgoing)
1206 """
1206 """
1207 m = self.changelog.nodemap
1207 m = self.changelog.nodemap
1208 search = []
1208 search = []
1209 fetch = {}
1209 fetch = {}
1210 seen = {}
1210 seen = {}
1211 seenbranch = {}
1211 seenbranch = {}
1212 if base == None:
1212 if base == None:
1213 base = {}
1213 base = {}
1214
1214
1215 if not heads:
1215 if not heads:
1216 heads = remote.heads()
1216 heads = remote.heads()
1217
1217
1218 if self.changelog.tip() == nullid:
1218 if self.changelog.tip() == nullid:
1219 base[nullid] = 1
1219 base[nullid] = 1
1220 if heads != [nullid]:
1220 if heads != [nullid]:
1221 return [nullid]
1221 return [nullid]
1222 return []
1222 return []
1223
1223
1224 # assume we're closer to the tip than the root
1224 # assume we're closer to the tip than the root
1225 # and start by examining the heads
1225 # and start by examining the heads
1226 self.ui.status(_("searching for changes\n"))
1226 self.ui.status(_("searching for changes\n"))
1227
1227
1228 unknown = []
1228 unknown = []
1229 for h in heads:
1229 for h in heads:
1230 if h not in m:
1230 if h not in m:
1231 unknown.append(h)
1231 unknown.append(h)
1232 else:
1232 else:
1233 base[h] = 1
1233 base[h] = 1
1234
1234
1235 if not unknown:
1235 if not unknown:
1236 return []
1236 return []
1237
1237
1238 req = dict.fromkeys(unknown)
1238 req = dict.fromkeys(unknown)
1239 reqcnt = 0
1239 reqcnt = 0
1240
1240
1241 # search through remote branches
1241 # search through remote branches
1242 # a 'branch' here is a linear segment of history, with four parts:
1242 # a 'branch' here is a linear segment of history, with four parts:
1243 # head, root, first parent, second parent
1243 # head, root, first parent, second parent
1244 # (a branch always has two parents (or none) by definition)
1244 # (a branch always has two parents (or none) by definition)
1245 unknown = remote.branches(unknown)
1245 unknown = remote.branches(unknown)
1246 while unknown:
1246 while unknown:
1247 r = []
1247 r = []
1248 while unknown:
1248 while unknown:
1249 n = unknown.pop(0)
1249 n = unknown.pop(0)
1250 if n[0] in seen:
1250 if n[0] in seen:
1251 continue
1251 continue
1252
1252
1253 self.ui.debug(_("examining %s:%s\n")
1253 self.ui.debug(_("examining %s:%s\n")
1254 % (short(n[0]), short(n[1])))
1254 % (short(n[0]), short(n[1])))
1255 if n[0] == nullid: # found the end of the branch
1255 if n[0] == nullid: # found the end of the branch
1256 pass
1256 pass
1257 elif n in seenbranch:
1257 elif n in seenbranch:
1258 self.ui.debug(_("branch already found\n"))
1258 self.ui.debug(_("branch already found\n"))
1259 continue
1259 continue
1260 elif n[1] and n[1] in m: # do we know the base?
1260 elif n[1] and n[1] in m: # do we know the base?
1261 self.ui.debug(_("found incomplete branch %s:%s\n")
1261 self.ui.debug(_("found incomplete branch %s:%s\n")
1262 % (short(n[0]), short(n[1])))
1262 % (short(n[0]), short(n[1])))
1263 search.append(n) # schedule branch range for scanning
1263 search.append(n) # schedule branch range for scanning
1264 seenbranch[n] = 1
1264 seenbranch[n] = 1
1265 else:
1265 else:
1266 if n[1] not in seen and n[1] not in fetch:
1266 if n[1] not in seen and n[1] not in fetch:
1267 if n[2] in m and n[3] in m:
1267 if n[2] in m and n[3] in m:
1268 self.ui.debug(_("found new changeset %s\n") %
1268 self.ui.debug(_("found new changeset %s\n") %
1269 short(n[1]))
1269 short(n[1]))
1270 fetch[n[1]] = 1 # earliest unknown
1270 fetch[n[1]] = 1 # earliest unknown
1271 for p in n[2:4]:
1271 for p in n[2:4]:
1272 if p in m:
1272 if p in m:
1273 base[p] = 1 # latest known
1273 base[p] = 1 # latest known
1274
1274
1275 for p in n[2:4]:
1275 for p in n[2:4]:
1276 if p not in req and p not in m:
1276 if p not in req and p not in m:
1277 r.append(p)
1277 r.append(p)
1278 req[p] = 1
1278 req[p] = 1
1279 seen[n[0]] = 1
1279 seen[n[0]] = 1
1280
1280
1281 if r:
1281 if r:
1282 reqcnt += 1
1282 reqcnt += 1
1283 self.ui.debug(_("request %d: %s\n") %
1283 self.ui.debug(_("request %d: %s\n") %
1284 (reqcnt, " ".join(map(short, r))))
1284 (reqcnt, " ".join(map(short, r))))
1285 for p in xrange(0, len(r), 10):
1285 for p in xrange(0, len(r), 10):
1286 for b in remote.branches(r[p:p+10]):
1286 for b in remote.branches(r[p:p+10]):
1287 self.ui.debug(_("received %s:%s\n") %
1287 self.ui.debug(_("received %s:%s\n") %
1288 (short(b[0]), short(b[1])))
1288 (short(b[0]), short(b[1])))
1289 unknown.append(b)
1289 unknown.append(b)
1290
1290
1291 # do binary search on the branches we found
1291 # do binary search on the branches we found
1292 while search:
1292 while search:
1293 n = search.pop(0)
1293 n = search.pop(0)
1294 reqcnt += 1
1294 reqcnt += 1
1295 l = remote.between([(n[0], n[1])])[0]
1295 l = remote.between([(n[0], n[1])])[0]
1296 l.append(n[1])
1296 l.append(n[1])
1297 p = n[0]
1297 p = n[0]
1298 f = 1
1298 f = 1
1299 for i in l:
1299 for i in l:
1300 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1300 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1301 if i in m:
1301 if i in m:
1302 if f <= 2:
1302 if f <= 2:
1303 self.ui.debug(_("found new branch changeset %s\n") %
1303 self.ui.debug(_("found new branch changeset %s\n") %
1304 short(p))
1304 short(p))
1305 fetch[p] = 1
1305 fetch[p] = 1
1306 base[i] = 1
1306 base[i] = 1
1307 else:
1307 else:
1308 self.ui.debug(_("narrowed branch search to %s:%s\n")
1308 self.ui.debug(_("narrowed branch search to %s:%s\n")
1309 % (short(p), short(i)))
1309 % (short(p), short(i)))
1310 search.append((p, i))
1310 search.append((p, i))
1311 break
1311 break
1312 p, f = i, f * 2
1312 p, f = i, f * 2
1313
1313
1314 # sanity check our fetch list
1314 # sanity check our fetch list
1315 for f in fetch.keys():
1315 for f in fetch.keys():
1316 if f in m:
1316 if f in m:
1317 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1317 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1318
1318
1319 if base.keys() == [nullid]:
1319 if base.keys() == [nullid]:
1320 if force:
1320 if force:
1321 self.ui.warn(_("warning: repository is unrelated\n"))
1321 self.ui.warn(_("warning: repository is unrelated\n"))
1322 else:
1322 else:
1323 raise util.Abort(_("repository is unrelated"))
1323 raise util.Abort(_("repository is unrelated"))
1324
1324
1325 self.ui.debug(_("found new changesets starting at ") +
1325 self.ui.debug(_("found new changesets starting at ") +
1326 " ".join([short(f) for f in fetch]) + "\n")
1326 " ".join([short(f) for f in fetch]) + "\n")
1327
1327
1328 self.ui.debug(_("%d total queries\n") % reqcnt)
1328 self.ui.debug(_("%d total queries\n") % reqcnt)
1329
1329
1330 return fetch.keys()
1330 return fetch.keys()
1331
1331
1332 def findoutgoing(self, remote, base=None, heads=None, force=False):
1332 def findoutgoing(self, remote, base=None, heads=None, force=False):
1333 """Return list of nodes that are roots of subsets not in remote
1333 """Return list of nodes that are roots of subsets not in remote
1334
1334
1335 If base dict is specified, assume that these nodes and their parents
1335 If base dict is specified, assume that these nodes and their parents
1336 exist on the remote side.
1336 exist on the remote side.
1337 If a list of heads is specified, return only nodes which are heads
1337 If a list of heads is specified, return only nodes which are heads
1338 or ancestors of these heads, and return a second element which
1338 or ancestors of these heads, and return a second element which
1339 contains all remote heads which get new children.
1339 contains all remote heads which get new children.
1340 """
1340 """
1341 if base == None:
1341 if base == None:
1342 base = {}
1342 base = {}
1343 self.findincoming(remote, base, heads, force=force)
1343 self.findincoming(remote, base, heads, force=force)
1344
1344
1345 self.ui.debug(_("common changesets up to ")
1345 self.ui.debug(_("common changesets up to ")
1346 + " ".join(map(short, base.keys())) + "\n")
1346 + " ".join(map(short, base.keys())) + "\n")
1347
1347
1348 remain = dict.fromkeys(self.changelog.nodemap)
1348 remain = dict.fromkeys(self.changelog.nodemap)
1349
1349
1350 # prune everything remote has from the tree
1350 # prune everything remote has from the tree
1351 del remain[nullid]
1351 del remain[nullid]
1352 remove = base.keys()
1352 remove = base.keys()
1353 while remove:
1353 while remove:
1354 n = remove.pop(0)
1354 n = remove.pop(0)
1355 if n in remain:
1355 if n in remain:
1356 del remain[n]
1356 del remain[n]
1357 for p in self.changelog.parents(n):
1357 for p in self.changelog.parents(n):
1358 remove.append(p)
1358 remove.append(p)
1359
1359
1360 # find every node whose parents have been pruned
1360 # find every node whose parents have been pruned
1361 subset = []
1361 subset = []
1362 # find every remote head that will get new children
1362 # find every remote head that will get new children
1363 updated_heads = {}
1363 updated_heads = {}
1364 for n in remain:
1364 for n in remain:
1365 p1, p2 = self.changelog.parents(n)
1365 p1, p2 = self.changelog.parents(n)
1366 if p1 not in remain and p2 not in remain:
1366 if p1 not in remain and p2 not in remain:
1367 subset.append(n)
1367 subset.append(n)
1368 if heads:
1368 if heads:
1369 if p1 in heads:
1369 if p1 in heads:
1370 updated_heads[p1] = True
1370 updated_heads[p1] = True
1371 if p2 in heads:
1371 if p2 in heads:
1372 updated_heads[p2] = True
1372 updated_heads[p2] = True
1373
1373
1374 # this is the set of all roots we have to push
1374 # this is the set of all roots we have to push
1375 if heads:
1375 if heads:
1376 return subset, updated_heads.keys()
1376 return subset, updated_heads.keys()
1377 else:
1377 else:
1378 return subset
1378 return subset
1379
1379
1380 def pull(self, remote, heads=None, force=False):
1380 def pull(self, remote, heads=None, force=False):
1381 lock = self.lock()
1381 lock = self.lock()
1382 try:
1382 try:
1383 fetch = self.findincoming(remote, heads=heads, force=force)
1383 fetch = self.findincoming(remote, heads=heads, force=force)
1384 if fetch == [nullid]:
1384 if fetch == [nullid]:
1385 self.ui.status(_("requesting all changes\n"))
1385 self.ui.status(_("requesting all changes\n"))
1386
1386
1387 if not fetch:
1387 if not fetch:
1388 self.ui.status(_("no changes found\n"))
1388 self.ui.status(_("no changes found\n"))
1389 return 0
1389 return 0
1390
1390
1391 if heads is None:
1391 if heads is None:
1392 cg = remote.changegroup(fetch, 'pull')
1392 cg = remote.changegroup(fetch, 'pull')
1393 else:
1393 else:
1394 if 'changegroupsubset' not in remote.capabilities:
1394 if 'changegroupsubset' not in remote.capabilities:
1395 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1395 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1396 cg = remote.changegroupsubset(fetch, heads, 'pull')
1396 cg = remote.changegroupsubset(fetch, heads, 'pull')
1397 return self.addchangegroup(cg, 'pull', remote.url())
1397 return self.addchangegroup(cg, 'pull', remote.url())
1398 finally:
1398 finally:
1399 del lock
1399 del lock
1400
1400
1401 def push(self, remote, force=False, revs=None):
1401 def push(self, remote, force=False, revs=None):
1402 # there are two ways to push to remote repo:
1402 # there are two ways to push to remote repo:
1403 #
1403 #
1404 # addchangegroup assumes local user can lock remote
1404 # addchangegroup assumes local user can lock remote
1405 # repo (local filesystem, old ssh servers).
1405 # repo (local filesystem, old ssh servers).
1406 #
1406 #
1407 # unbundle assumes local user cannot lock remote repo (new ssh
1407 # unbundle assumes local user cannot lock remote repo (new ssh
1408 # servers, http servers).
1408 # servers, http servers).
1409
1409
1410 if remote.capable('unbundle'):
1410 if remote.capable('unbundle'):
1411 return self.push_unbundle(remote, force, revs)
1411 return self.push_unbundle(remote, force, revs)
1412 return self.push_addchangegroup(remote, force, revs)
1412 return self.push_addchangegroup(remote, force, revs)
1413
1413
1414 def prepush(self, remote, force, revs):
1414 def prepush(self, remote, force, revs):
1415 base = {}
1415 base = {}
1416 remote_heads = remote.heads()
1416 remote_heads = remote.heads()
1417 inc = self.findincoming(remote, base, remote_heads, force=force)
1417 inc = self.findincoming(remote, base, remote_heads, force=force)
1418
1418
1419 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1419 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1420 if revs is not None:
1420 if revs is not None:
1421 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1421 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1422 else:
1422 else:
1423 bases, heads = update, self.changelog.heads()
1423 bases, heads = update, self.changelog.heads()
1424
1424
1425 if not bases:
1425 if not bases:
1426 self.ui.status(_("no changes found\n"))
1426 self.ui.status(_("no changes found\n"))
1427 return None, 1
1427 return None, 1
1428 elif not force:
1428 elif not force:
1429 # check if we're creating new remote heads
1429 # check if we're creating new remote heads
1430 # to be a remote head after push, node must be either
1430 # to be a remote head after push, node must be either
1431 # - unknown locally
1431 # - unknown locally
1432 # - a local outgoing head descended from update
1432 # - a local outgoing head descended from update
1433 # - a remote head that's known locally and not
1433 # - a remote head that's known locally and not
1434 # ancestral to an outgoing head
1434 # ancestral to an outgoing head
1435
1435
1436 warn = 0
1436 warn = 0
1437
1437
1438 if remote_heads == [nullid]:
1438 if remote_heads == [nullid]:
1439 warn = 0
1439 warn = 0
1440 elif not revs and len(heads) > len(remote_heads):
1440 elif not revs and len(heads) > len(remote_heads):
1441 warn = 1
1441 warn = 1
1442 else:
1442 else:
1443 newheads = list(heads)
1443 newheads = list(heads)
1444 for r in remote_heads:
1444 for r in remote_heads:
1445 if r in self.changelog.nodemap:
1445 if r in self.changelog.nodemap:
1446 desc = self.changelog.heads(r, heads)
1446 desc = self.changelog.heads(r, heads)
1447 l = [h for h in heads if h in desc]
1447 l = [h for h in heads if h in desc]
1448 if not l:
1448 if not l:
1449 newheads.append(r)
1449 newheads.append(r)
1450 else:
1450 else:
1451 newheads.append(r)
1451 newheads.append(r)
1452 if len(newheads) > len(remote_heads):
1452 if len(newheads) > len(remote_heads):
1453 warn = 1
1453 warn = 1
1454
1454
1455 if warn:
1455 if warn:
1456 self.ui.warn(_("abort: push creates new remote branches!\n"))
1456 self.ui.warn(_("abort: push creates new remote branches!\n"))
1457 self.ui.status(_("(did you forget to merge?"
1457 self.ui.status(_("(did you forget to merge?"
1458 " use push -f to force)\n"))
1458 " use push -f to force)\n"))
1459 return None, 1
1459 return None, 1
1460 elif inc:
1460 elif inc:
1461 self.ui.warn(_("note: unsynced remote changes!\n"))
1461 self.ui.warn(_("note: unsynced remote changes!\n"))
1462
1462
1463
1463
1464 if revs is None:
1464 if revs is None:
1465 cg = self.changegroup(update, 'push')
1465 cg = self.changegroup(update, 'push')
1466 else:
1466 else:
1467 cg = self.changegroupsubset(update, revs, 'push')
1467 cg = self.changegroupsubset(update, revs, 'push')
1468 return cg, remote_heads
1468 return cg, remote_heads
1469
1469
1470 def push_addchangegroup(self, remote, force, revs):
1470 def push_addchangegroup(self, remote, force, revs):
1471 lock = remote.lock()
1471 lock = remote.lock()
1472 try:
1472 try:
1473 ret = self.prepush(remote, force, revs)
1473 ret = self.prepush(remote, force, revs)
1474 if ret[0] is not None:
1474 if ret[0] is not None:
1475 cg, remote_heads = ret
1475 cg, remote_heads = ret
1476 return remote.addchangegroup(cg, 'push', self.url())
1476 return remote.addchangegroup(cg, 'push', self.url())
1477 return ret[1]
1477 return ret[1]
1478 finally:
1478 finally:
1479 del lock
1479 del lock
1480
1480
1481 def push_unbundle(self, remote, force, revs):
1481 def push_unbundle(self, remote, force, revs):
1482 # local repo finds heads on server, finds out what revs it
1482 # local repo finds heads on server, finds out what revs it
1483 # must push. once revs transferred, if server finds it has
1483 # must push. once revs transferred, if server finds it has
1484 # different heads (someone else won commit/push race), server
1484 # different heads (someone else won commit/push race), server
1485 # aborts.
1485 # aborts.
1486
1486
1487 ret = self.prepush(remote, force, revs)
1487 ret = self.prepush(remote, force, revs)
1488 if ret[0] is not None:
1488 if ret[0] is not None:
1489 cg, remote_heads = ret
1489 cg, remote_heads = ret
1490 if force: remote_heads = ['force']
1490 if force: remote_heads = ['force']
1491 return remote.unbundle(cg, remote_heads, 'push')
1491 return remote.unbundle(cg, remote_heads, 'push')
1492 return ret[1]
1492 return ret[1]
1493
1493
1494 def changegroupinfo(self, nodes):
1494 def changegroupinfo(self, nodes):
1495 self.ui.note(_("%d changesets found\n") % len(nodes))
1495 self.ui.note(_("%d changesets found\n") % len(nodes))
1496 if self.ui.debugflag:
1496 if self.ui.debugflag:
1497 self.ui.debug(_("List of changesets:\n"))
1497 self.ui.debug(_("List of changesets:\n"))
1498 for node in nodes:
1498 for node in nodes:
1499 self.ui.debug("%s\n" % hex(node))
1499 self.ui.debug("%s\n" % hex(node))
1500
1500
1501 def changegroupsubset(self, bases, heads, source):
1501 def changegroupsubset(self, bases, heads, source):
1502 """This function generates a changegroup consisting of all the nodes
1502 """This function generates a changegroup consisting of all the nodes
1503 that are descendents of any of the bases, and ancestors of any of
1503 that are descendents of any of the bases, and ancestors of any of
1504 the heads.
1504 the heads.
1505
1505
1506 It is fairly complex as determining which filenodes and which
1506 It is fairly complex as determining which filenodes and which
1507 manifest nodes need to be included for the changeset to be complete
1507 manifest nodes need to be included for the changeset to be complete
1508 is non-trivial.
1508 is non-trivial.
1509
1509
1510 Another wrinkle is doing the reverse, figuring out which changeset in
1510 Another wrinkle is doing the reverse, figuring out which changeset in
1511 the changegroup a particular filenode or manifestnode belongs to."""
1511 the changegroup a particular filenode or manifestnode belongs to."""
1512
1512
1513 self.hook('preoutgoing', throw=True, source=source)
1513 self.hook('preoutgoing', throw=True, source=source)
1514
1514
1515 # Set up some initial variables
1515 # Set up some initial variables
1516 # Make it easy to refer to self.changelog
1516 # Make it easy to refer to self.changelog
1517 cl = self.changelog
1517 cl = self.changelog
1518 # msng is short for missing - compute the list of changesets in this
1518 # msng is short for missing - compute the list of changesets in this
1519 # changegroup.
1519 # changegroup.
1520 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1520 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1521 self.changegroupinfo(msng_cl_lst)
1521 self.changegroupinfo(msng_cl_lst)
1522 # Some bases may turn out to be superfluous, and some heads may be
1522 # Some bases may turn out to be superfluous, and some heads may be
1523 # too. nodesbetween will return the minimal set of bases and heads
1523 # too. nodesbetween will return the minimal set of bases and heads
1524 # necessary to re-create the changegroup.
1524 # necessary to re-create the changegroup.
1525
1525
1526 # Known heads are the list of heads that it is assumed the recipient
1526 # Known heads are the list of heads that it is assumed the recipient
1527 # of this changegroup will know about.
1527 # of this changegroup will know about.
1528 knownheads = {}
1528 knownheads = {}
1529 # We assume that all parents of bases are known heads.
1529 # We assume that all parents of bases are known heads.
1530 for n in bases:
1530 for n in bases:
1531 for p in cl.parents(n):
1531 for p in cl.parents(n):
1532 if p != nullid:
1532 if p != nullid:
1533 knownheads[p] = 1
1533 knownheads[p] = 1
1534 knownheads = knownheads.keys()
1534 knownheads = knownheads.keys()
1535 if knownheads:
1535 if knownheads:
1536 # Now that we know what heads are known, we can compute which
1536 # Now that we know what heads are known, we can compute which
1537 # changesets are known. The recipient must know about all
1537 # changesets are known. The recipient must know about all
1538 # changesets required to reach the known heads from the null
1538 # changesets required to reach the known heads from the null
1539 # changeset.
1539 # changeset.
1540 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1540 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1541 junk = None
1541 junk = None
1542 # Transform the list into an ersatz set.
1542 # Transform the list into an ersatz set.
1543 has_cl_set = dict.fromkeys(has_cl_set)
1543 has_cl_set = dict.fromkeys(has_cl_set)
1544 else:
1544 else:
1545 # If there were no known heads, the recipient cannot be assumed to
1545 # If there were no known heads, the recipient cannot be assumed to
1546 # know about any changesets.
1546 # know about any changesets.
1547 has_cl_set = {}
1547 has_cl_set = {}
1548
1548
1549 # Make it easy to refer to self.manifest
1549 # Make it easy to refer to self.manifest
1550 mnfst = self.manifest
1550 mnfst = self.manifest
1551 # We don't know which manifests are missing yet
1551 # We don't know which manifests are missing yet
1552 msng_mnfst_set = {}
1552 msng_mnfst_set = {}
1553 # Nor do we know which filenodes are missing.
1553 # Nor do we know which filenodes are missing.
1554 msng_filenode_set = {}
1554 msng_filenode_set = {}
1555
1555
1556 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1556 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1557 junk = None
1557 junk = None
1558
1558
1559 # A changeset always belongs to itself, so the changenode lookup
1559 # A changeset always belongs to itself, so the changenode lookup
1560 # function for a changenode is identity.
1560 # function for a changenode is identity.
1561 def identity(x):
1561 def identity(x):
1562 return x
1562 return x
1563
1563
1564 # A function generating function. Sets up an environment for the
1564 # A function generating function. Sets up an environment for the
1565 # inner function.
1565 # inner function.
1566 def cmp_by_rev_func(revlog):
1566 def cmp_by_rev_func(revlog):
1567 # Compare two nodes by their revision number in the environment's
1567 # Compare two nodes by their revision number in the environment's
1568 # revision history. Since the revision number both represents the
1568 # revision history. Since the revision number both represents the
1569 # most efficient order to read the nodes in, and represents a
1569 # most efficient order to read the nodes in, and represents a
1570 # topological sorting of the nodes, this function is often useful.
1570 # topological sorting of the nodes, this function is often useful.
1571 def cmp_by_rev(a, b):
1571 def cmp_by_rev(a, b):
1572 return cmp(revlog.rev(a), revlog.rev(b))
1572 return cmp(revlog.rev(a), revlog.rev(b))
1573 return cmp_by_rev
1573 return cmp_by_rev
1574
1574
1575 # If we determine that a particular file or manifest node must be a
1575 # If we determine that a particular file or manifest node must be a
1576 # node that the recipient of the changegroup will already have, we can
1576 # node that the recipient of the changegroup will already have, we can
1577 # also assume the recipient will have all the parents. This function
1577 # also assume the recipient will have all the parents. This function
1578 # prunes them from the set of missing nodes.
1578 # prunes them from the set of missing nodes.
1579 def prune_parents(revlog, hasset, msngset):
1579 def prune_parents(revlog, hasset, msngset):
1580 haslst = hasset.keys()
1580 haslst = hasset.keys()
1581 haslst.sort(cmp_by_rev_func(revlog))
1581 haslst.sort(cmp_by_rev_func(revlog))
1582 for node in haslst:
1582 for node in haslst:
1583 parentlst = [p for p in revlog.parents(node) if p != nullid]
1583 parentlst = [p for p in revlog.parents(node) if p != nullid]
1584 while parentlst:
1584 while parentlst:
1585 n = parentlst.pop()
1585 n = parentlst.pop()
1586 if n not in hasset:
1586 if n not in hasset:
1587 hasset[n] = 1
1587 hasset[n] = 1
1588 p = [p for p in revlog.parents(n) if p != nullid]
1588 p = [p for p in revlog.parents(n) if p != nullid]
1589 parentlst.extend(p)
1589 parentlst.extend(p)
1590 for n in hasset:
1590 for n in hasset:
1591 msngset.pop(n, None)
1591 msngset.pop(n, None)
1592
1592
1593 # This is a function generating function used to set up an environment
1593 # This is a function generating function used to set up an environment
1594 # for the inner function to execute in.
1594 # for the inner function to execute in.
1595 def manifest_and_file_collector(changedfileset):
1595 def manifest_and_file_collector(changedfileset):
1596 # This is an information gathering function that gathers
1596 # This is an information gathering function that gathers
1597 # information from each changeset node that goes out as part of
1597 # information from each changeset node that goes out as part of
1598 # the changegroup. The information gathered is a list of which
1598 # the changegroup. The information gathered is a list of which
1599 # manifest nodes are potentially required (the recipient may
1599 # manifest nodes are potentially required (the recipient may
1600 # already have them) and total list of all files which were
1600 # already have them) and total list of all files which were
1601 # changed in any changeset in the changegroup.
1601 # changed in any changeset in the changegroup.
1602 #
1602 #
1603 # We also remember the first changenode we saw any manifest
1603 # We also remember the first changenode we saw any manifest
1604 # referenced by so we can later determine which changenode 'owns'
1604 # referenced by so we can later determine which changenode 'owns'
1605 # the manifest.
1605 # the manifest.
1606 def collect_manifests_and_files(clnode):
1606 def collect_manifests_and_files(clnode):
1607 c = cl.read(clnode)
1607 c = cl.read(clnode)
1608 for f in c[3]:
1608 for f in c[3]:
1609 # This is to make sure we only have one instance of each
1609 # This is to make sure we only have one instance of each
1610 # filename string for each filename.
1610 # filename string for each filename.
1611 changedfileset.setdefault(f, f)
1611 changedfileset.setdefault(f, f)
1612 msng_mnfst_set.setdefault(c[0], clnode)
1612 msng_mnfst_set.setdefault(c[0], clnode)
1613 return collect_manifests_and_files
1613 return collect_manifests_and_files
1614
1614
1615 # Figure out which manifest nodes (of the ones we think might be part
1615 # Figure out which manifest nodes (of the ones we think might be part
1616 # of the changegroup) the recipient must know about and remove them
1616 # of the changegroup) the recipient must know about and remove them
1617 # from the changegroup.
1617 # from the changegroup.
1618 def prune_manifests():
1618 def prune_manifests():
1619 has_mnfst_set = {}
1619 has_mnfst_set = {}
1620 for n in msng_mnfst_set:
1620 for n in msng_mnfst_set:
1621 # If a 'missing' manifest thinks it belongs to a changenode
1621 # If a 'missing' manifest thinks it belongs to a changenode
1622 # the recipient is assumed to have, obviously the recipient
1622 # the recipient is assumed to have, obviously the recipient
1623 # must have that manifest.
1623 # must have that manifest.
1624 linknode = cl.node(mnfst.linkrev(n))
1624 linknode = cl.node(mnfst.linkrev(n))
1625 if linknode in has_cl_set:
1625 if linknode in has_cl_set:
1626 has_mnfst_set[n] = 1
1626 has_mnfst_set[n] = 1
1627 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1627 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1628
1628
1629 # Use the information collected in collect_manifests_and_files to say
1629 # Use the information collected in collect_manifests_and_files to say
1630 # which changenode any manifestnode belongs to.
1630 # which changenode any manifestnode belongs to.
1631 def lookup_manifest_link(mnfstnode):
1631 def lookup_manifest_link(mnfstnode):
1632 return msng_mnfst_set[mnfstnode]
1632 return msng_mnfst_set[mnfstnode]
1633
1633
1634 # A function generating function that sets up the initial environment
1634 # A function generating function that sets up the initial environment
1635 # the inner function.
1635 # the inner function.
1636 def filenode_collector(changedfiles):
1636 def filenode_collector(changedfiles):
1637 next_rev = [0]
1637 next_rev = [0]
1638 # This gathers information from each manifestnode included in the
1638 # This gathers information from each manifestnode included in the
1639 # changegroup about which filenodes the manifest node references
1639 # changegroup about which filenodes the manifest node references
1640 # so we can include those in the changegroup too.
1640 # so we can include those in the changegroup too.
1641 #
1641 #
1642 # It also remembers which changenode each filenode belongs to. It
1642 # It also remembers which changenode each filenode belongs to. It
1643 # does this by assuming the a filenode belongs to the changenode
1643 # does this by assuming the a filenode belongs to the changenode
1644 # the first manifest that references it belongs to.
1644 # the first manifest that references it belongs to.
1645 def collect_msng_filenodes(mnfstnode):
1645 def collect_msng_filenodes(mnfstnode):
1646 r = mnfst.rev(mnfstnode)
1646 r = mnfst.rev(mnfstnode)
1647 if r == next_rev[0]:
1647 if r == next_rev[0]:
1648 # If the last rev we looked at was the one just previous,
1648 # If the last rev we looked at was the one just previous,
1649 # we only need to see a diff.
1649 # we only need to see a diff.
1650 deltamf = mnfst.readdelta(mnfstnode)
1650 deltamf = mnfst.readdelta(mnfstnode)
1651 # For each line in the delta
1651 # For each line in the delta
1652 for f, fnode in deltamf.items():
1652 for f, fnode in deltamf.items():
1653 f = changedfiles.get(f, None)
1653 f = changedfiles.get(f, None)
1654 # And if the file is in the list of files we care
1654 # And if the file is in the list of files we care
1655 # about.
1655 # about.
1656 if f is not None:
1656 if f is not None:
1657 # Get the changenode this manifest belongs to
1657 # Get the changenode this manifest belongs to
1658 clnode = msng_mnfst_set[mnfstnode]
1658 clnode = msng_mnfst_set[mnfstnode]
1659 # Create the set of filenodes for the file if
1659 # Create the set of filenodes for the file if
1660 # there isn't one already.
1660 # there isn't one already.
1661 ndset = msng_filenode_set.setdefault(f, {})
1661 ndset = msng_filenode_set.setdefault(f, {})
1662 # And set the filenode's changelog node to the
1662 # And set the filenode's changelog node to the
1663 # manifest's if it hasn't been set already.
1663 # manifest's if it hasn't been set already.
1664 ndset.setdefault(fnode, clnode)
1664 ndset.setdefault(fnode, clnode)
1665 else:
1665 else:
1666 # Otherwise we need a full manifest.
1666 # Otherwise we need a full manifest.
1667 m = mnfst.read(mnfstnode)
1667 m = mnfst.read(mnfstnode)
1668 # For every file in we care about.
1668 # For every file in we care about.
1669 for f in changedfiles:
1669 for f in changedfiles:
1670 fnode = m.get(f, None)
1670 fnode = m.get(f, None)
1671 # If it's in the manifest
1671 # If it's in the manifest
1672 if fnode is not None:
1672 if fnode is not None:
1673 # See comments above.
1673 # See comments above.
1674 clnode = msng_mnfst_set[mnfstnode]
1674 clnode = msng_mnfst_set[mnfstnode]
1675 ndset = msng_filenode_set.setdefault(f, {})
1675 ndset = msng_filenode_set.setdefault(f, {})
1676 ndset.setdefault(fnode, clnode)
1676 ndset.setdefault(fnode, clnode)
1677 # Remember the revision we hope to see next.
1677 # Remember the revision we hope to see next.
1678 next_rev[0] = r + 1
1678 next_rev[0] = r + 1
1679 return collect_msng_filenodes
1679 return collect_msng_filenodes
1680
1680
1681 # We have a list of filenodes we think we need for a file, lets remove
1681 # We have a list of filenodes we think we need for a file, lets remove
1682 # all those we now the recipient must have.
1682 # all those we now the recipient must have.
1683 def prune_filenodes(f, filerevlog):
1683 def prune_filenodes(f, filerevlog):
1684 msngset = msng_filenode_set[f]
1684 msngset = msng_filenode_set[f]
1685 hasset = {}
1685 hasset = {}
1686 # If a 'missing' filenode thinks it belongs to a changenode we
1686 # If a 'missing' filenode thinks it belongs to a changenode we
1687 # assume the recipient must have, then the recipient must have
1687 # assume the recipient must have, then the recipient must have
1688 # that filenode.
1688 # that filenode.
1689 for n in msngset:
1689 for n in msngset:
1690 clnode = cl.node(filerevlog.linkrev(n))
1690 clnode = cl.node(filerevlog.linkrev(n))
1691 if clnode in has_cl_set:
1691 if clnode in has_cl_set:
1692 hasset[n] = 1
1692 hasset[n] = 1
1693 prune_parents(filerevlog, hasset, msngset)
1693 prune_parents(filerevlog, hasset, msngset)
1694
1694
1695 # A function generator function that sets up the a context for the
1695 # A function generator function that sets up the a context for the
1696 # inner function.
1696 # inner function.
1697 def lookup_filenode_link_func(fname):
1697 def lookup_filenode_link_func(fname):
1698 msngset = msng_filenode_set[fname]
1698 msngset = msng_filenode_set[fname]
1699 # Lookup the changenode the filenode belongs to.
1699 # Lookup the changenode the filenode belongs to.
1700 def lookup_filenode_link(fnode):
1700 def lookup_filenode_link(fnode):
1701 return msngset[fnode]
1701 return msngset[fnode]
1702 return lookup_filenode_link
1702 return lookup_filenode_link
1703
1703
1704 # Now that we have all theses utility functions to help out and
1704 # Now that we have all theses utility functions to help out and
1705 # logically divide up the task, generate the group.
1705 # logically divide up the task, generate the group.
1706 def gengroup():
1706 def gengroup():
1707 # The set of changed files starts empty.
1707 # The set of changed files starts empty.
1708 changedfiles = {}
1708 changedfiles = {}
1709 # Create a changenode group generator that will call our functions
1709 # Create a changenode group generator that will call our functions
1710 # back to lookup the owning changenode and collect information.
1710 # back to lookup the owning changenode and collect information.
1711 group = cl.group(msng_cl_lst, identity,
1711 group = cl.group(msng_cl_lst, identity,
1712 manifest_and_file_collector(changedfiles))
1712 manifest_and_file_collector(changedfiles))
1713 for chnk in group:
1713 for chnk in group:
1714 yield chnk
1714 yield chnk
1715
1715
1716 # The list of manifests has been collected by the generator
1716 # The list of manifests has been collected by the generator
1717 # calling our functions back.
1717 # calling our functions back.
1718 prune_manifests()
1718 prune_manifests()
1719 msng_mnfst_lst = msng_mnfst_set.keys()
1719 msng_mnfst_lst = msng_mnfst_set.keys()
1720 # Sort the manifestnodes by revision number.
1720 # Sort the manifestnodes by revision number.
1721 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1721 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1722 # Create a generator for the manifestnodes that calls our lookup
1722 # Create a generator for the manifestnodes that calls our lookup
1723 # and data collection functions back.
1723 # and data collection functions back.
1724 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1724 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1725 filenode_collector(changedfiles))
1725 filenode_collector(changedfiles))
1726 for chnk in group:
1726 for chnk in group:
1727 yield chnk
1727 yield chnk
1728
1728
1729 # These are no longer needed, dereference and toss the memory for
1729 # These are no longer needed, dereference and toss the memory for
1730 # them.
1730 # them.
1731 msng_mnfst_lst = None
1731 msng_mnfst_lst = None
1732 msng_mnfst_set.clear()
1732 msng_mnfst_set.clear()
1733
1733
1734 changedfiles = changedfiles.keys()
1734 changedfiles = changedfiles.keys()
1735 changedfiles.sort()
1735 changedfiles.sort()
1736 # Go through all our files in order sorted by name.
1736 # Go through all our files in order sorted by name.
1737 for fname in changedfiles:
1737 for fname in changedfiles:
1738 filerevlog = self.file(fname)
1738 filerevlog = self.file(fname)
1739 if filerevlog.count() == 0:
1739 if filerevlog.count() == 0:
1740 raise util.abort(_("empty or missing revlog for %s") % fname)
1740 raise util.Abort(_("empty or missing revlog for %s") % fname)
1741 # Toss out the filenodes that the recipient isn't really
1741 # Toss out the filenodes that the recipient isn't really
1742 # missing.
1742 # missing.
1743 if msng_filenode_set.has_key(fname):
1743 if msng_filenode_set.has_key(fname):
1744 prune_filenodes(fname, filerevlog)
1744 prune_filenodes(fname, filerevlog)
1745 msng_filenode_lst = msng_filenode_set[fname].keys()
1745 msng_filenode_lst = msng_filenode_set[fname].keys()
1746 else:
1746 else:
1747 msng_filenode_lst = []
1747 msng_filenode_lst = []
1748 # If any filenodes are left, generate the group for them,
1748 # If any filenodes are left, generate the group for them,
1749 # otherwise don't bother.
1749 # otherwise don't bother.
1750 if len(msng_filenode_lst) > 0:
1750 if len(msng_filenode_lst) > 0:
1751 yield changegroup.chunkheader(len(fname))
1751 yield changegroup.chunkheader(len(fname))
1752 yield fname
1752 yield fname
1753 # Sort the filenodes by their revision #
1753 # Sort the filenodes by their revision #
1754 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1754 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1755 # Create a group generator and only pass in a changenode
1755 # Create a group generator and only pass in a changenode
1756 # lookup function as we need to collect no information
1756 # lookup function as we need to collect no information
1757 # from filenodes.
1757 # from filenodes.
1758 group = filerevlog.group(msng_filenode_lst,
1758 group = filerevlog.group(msng_filenode_lst,
1759 lookup_filenode_link_func(fname))
1759 lookup_filenode_link_func(fname))
1760 for chnk in group:
1760 for chnk in group:
1761 yield chnk
1761 yield chnk
1762 if msng_filenode_set.has_key(fname):
1762 if msng_filenode_set.has_key(fname):
1763 # Don't need this anymore, toss it to free memory.
1763 # Don't need this anymore, toss it to free memory.
1764 del msng_filenode_set[fname]
1764 del msng_filenode_set[fname]
1765 # Signal that no more groups are left.
1765 # Signal that no more groups are left.
1766 yield changegroup.closechunk()
1766 yield changegroup.closechunk()
1767
1767
1768 if msng_cl_lst:
1768 if msng_cl_lst:
1769 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1769 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1770
1770
1771 return util.chunkbuffer(gengroup())
1771 return util.chunkbuffer(gengroup())
1772
1772
1773 def changegroup(self, basenodes, source):
1773 def changegroup(self, basenodes, source):
1774 """Generate a changegroup of all nodes that we have that a recipient
1774 """Generate a changegroup of all nodes that we have that a recipient
1775 doesn't.
1775 doesn't.
1776
1776
1777 This is much easier than the previous function as we can assume that
1777 This is much easier than the previous function as we can assume that
1778 the recipient has any changenode we aren't sending them."""
1778 the recipient has any changenode we aren't sending them."""
1779
1779
1780 self.hook('preoutgoing', throw=True, source=source)
1780 self.hook('preoutgoing', throw=True, source=source)
1781
1781
1782 cl = self.changelog
1782 cl = self.changelog
1783 nodes = cl.nodesbetween(basenodes, None)[0]
1783 nodes = cl.nodesbetween(basenodes, None)[0]
1784 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1784 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1785 self.changegroupinfo(nodes)
1785 self.changegroupinfo(nodes)
1786
1786
1787 def identity(x):
1787 def identity(x):
1788 return x
1788 return x
1789
1789
1790 def gennodelst(revlog):
1790 def gennodelst(revlog):
1791 for r in xrange(0, revlog.count()):
1791 for r in xrange(0, revlog.count()):
1792 n = revlog.node(r)
1792 n = revlog.node(r)
1793 if revlog.linkrev(n) in revset:
1793 if revlog.linkrev(n) in revset:
1794 yield n
1794 yield n
1795
1795
1796 def changed_file_collector(changedfileset):
1796 def changed_file_collector(changedfileset):
1797 def collect_changed_files(clnode):
1797 def collect_changed_files(clnode):
1798 c = cl.read(clnode)
1798 c = cl.read(clnode)
1799 for fname in c[3]:
1799 for fname in c[3]:
1800 changedfileset[fname] = 1
1800 changedfileset[fname] = 1
1801 return collect_changed_files
1801 return collect_changed_files
1802
1802
1803 def lookuprevlink_func(revlog):
1803 def lookuprevlink_func(revlog):
1804 def lookuprevlink(n):
1804 def lookuprevlink(n):
1805 return cl.node(revlog.linkrev(n))
1805 return cl.node(revlog.linkrev(n))
1806 return lookuprevlink
1806 return lookuprevlink
1807
1807
1808 def gengroup():
1808 def gengroup():
1809 # construct a list of all changed files
1809 # construct a list of all changed files
1810 changedfiles = {}
1810 changedfiles = {}
1811
1811
1812 for chnk in cl.group(nodes, identity,
1812 for chnk in cl.group(nodes, identity,
1813 changed_file_collector(changedfiles)):
1813 changed_file_collector(changedfiles)):
1814 yield chnk
1814 yield chnk
1815 changedfiles = changedfiles.keys()
1815 changedfiles = changedfiles.keys()
1816 changedfiles.sort()
1816 changedfiles.sort()
1817
1817
1818 mnfst = self.manifest
1818 mnfst = self.manifest
1819 nodeiter = gennodelst(mnfst)
1819 nodeiter = gennodelst(mnfst)
1820 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1820 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1821 yield chnk
1821 yield chnk
1822
1822
1823 for fname in changedfiles:
1823 for fname in changedfiles:
1824 filerevlog = self.file(fname)
1824 filerevlog = self.file(fname)
1825 if filerevlog.count() == 0:
1825 if filerevlog.count() == 0:
1826 raise util.abort(_("empty or missing revlog for %s") % fname)
1826 raise util.Abort(_("empty or missing revlog for %s") % fname)
1827 nodeiter = gennodelst(filerevlog)
1827 nodeiter = gennodelst(filerevlog)
1828 nodeiter = list(nodeiter)
1828 nodeiter = list(nodeiter)
1829 if nodeiter:
1829 if nodeiter:
1830 yield changegroup.chunkheader(len(fname))
1830 yield changegroup.chunkheader(len(fname))
1831 yield fname
1831 yield fname
1832 lookup = lookuprevlink_func(filerevlog)
1832 lookup = lookuprevlink_func(filerevlog)
1833 for chnk in filerevlog.group(nodeiter, lookup):
1833 for chnk in filerevlog.group(nodeiter, lookup):
1834 yield chnk
1834 yield chnk
1835
1835
1836 yield changegroup.closechunk()
1836 yield changegroup.closechunk()
1837
1837
1838 if nodes:
1838 if nodes:
1839 self.hook('outgoing', node=hex(nodes[0]), source=source)
1839 self.hook('outgoing', node=hex(nodes[0]), source=source)
1840
1840
1841 return util.chunkbuffer(gengroup())
1841 return util.chunkbuffer(gengroup())
1842
1842
1843 def addchangegroup(self, source, srctype, url):
1843 def addchangegroup(self, source, srctype, url):
1844 """add changegroup to repo.
1844 """add changegroup to repo.
1845
1845
1846 return values:
1846 return values:
1847 - nothing changed or no source: 0
1847 - nothing changed or no source: 0
1848 - more heads than before: 1+added heads (2..n)
1848 - more heads than before: 1+added heads (2..n)
1849 - less heads than before: -1-removed heads (-2..-n)
1849 - less heads than before: -1-removed heads (-2..-n)
1850 - number of heads stays the same: 1
1850 - number of heads stays the same: 1
1851 """
1851 """
1852 def csmap(x):
1852 def csmap(x):
1853 self.ui.debug(_("add changeset %s\n") % short(x))
1853 self.ui.debug(_("add changeset %s\n") % short(x))
1854 return cl.count()
1854 return cl.count()
1855
1855
1856 def revmap(x):
1856 def revmap(x):
1857 return cl.rev(x)
1857 return cl.rev(x)
1858
1858
1859 if not source:
1859 if not source:
1860 return 0
1860 return 0
1861
1861
1862 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1862 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1863
1863
1864 changesets = files = revisions = 0
1864 changesets = files = revisions = 0
1865
1865
1866 # write changelog data to temp files so concurrent readers will not see
1866 # write changelog data to temp files so concurrent readers will not see
1867 # inconsistent view
1867 # inconsistent view
1868 cl = self.changelog
1868 cl = self.changelog
1869 cl.delayupdate()
1869 cl.delayupdate()
1870 oldheads = len(cl.heads())
1870 oldheads = len(cl.heads())
1871
1871
1872 tr = self.transaction()
1872 tr = self.transaction()
1873 try:
1873 try:
1874 trp = weakref.proxy(tr)
1874 trp = weakref.proxy(tr)
1875 # pull off the changeset group
1875 # pull off the changeset group
1876 self.ui.status(_("adding changesets\n"))
1876 self.ui.status(_("adding changesets\n"))
1877 cor = cl.count() - 1
1877 cor = cl.count() - 1
1878 chunkiter = changegroup.chunkiter(source)
1878 chunkiter = changegroup.chunkiter(source)
1879 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1879 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1880 raise util.Abort(_("received changelog group is empty"))
1880 raise util.Abort(_("received changelog group is empty"))
1881 cnr = cl.count() - 1
1881 cnr = cl.count() - 1
1882 changesets = cnr - cor
1882 changesets = cnr - cor
1883
1883
1884 # pull off the manifest group
1884 # pull off the manifest group
1885 self.ui.status(_("adding manifests\n"))
1885 self.ui.status(_("adding manifests\n"))
1886 chunkiter = changegroup.chunkiter(source)
1886 chunkiter = changegroup.chunkiter(source)
1887 # no need to check for empty manifest group here:
1887 # no need to check for empty manifest group here:
1888 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1888 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1889 # no new manifest will be created and the manifest group will
1889 # no new manifest will be created and the manifest group will
1890 # be empty during the pull
1890 # be empty during the pull
1891 self.manifest.addgroup(chunkiter, revmap, trp)
1891 self.manifest.addgroup(chunkiter, revmap, trp)
1892
1892
1893 # process the files
1893 # process the files
1894 self.ui.status(_("adding file changes\n"))
1894 self.ui.status(_("adding file changes\n"))
1895 while 1:
1895 while 1:
1896 f = changegroup.getchunk(source)
1896 f = changegroup.getchunk(source)
1897 if not f:
1897 if not f:
1898 break
1898 break
1899 self.ui.debug(_("adding %s revisions\n") % f)
1899 self.ui.debug(_("adding %s revisions\n") % f)
1900 fl = self.file(f)
1900 fl = self.file(f)
1901 o = fl.count()
1901 o = fl.count()
1902 chunkiter = changegroup.chunkiter(source)
1902 chunkiter = changegroup.chunkiter(source)
1903 if fl.addgroup(chunkiter, revmap, trp) is None:
1903 if fl.addgroup(chunkiter, revmap, trp) is None:
1904 raise util.Abort(_("received file revlog group is empty"))
1904 raise util.Abort(_("received file revlog group is empty"))
1905 revisions += fl.count() - o
1905 revisions += fl.count() - o
1906 files += 1
1906 files += 1
1907
1907
1908 # make changelog see real files again
1908 # make changelog see real files again
1909 cl.finalize(trp)
1909 cl.finalize(trp)
1910
1910
1911 newheads = len(self.changelog.heads())
1911 newheads = len(self.changelog.heads())
1912 heads = ""
1912 heads = ""
1913 if oldheads and newheads != oldheads:
1913 if oldheads and newheads != oldheads:
1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1915
1915
1916 self.ui.status(_("added %d changesets"
1916 self.ui.status(_("added %d changesets"
1917 " with %d changes to %d files%s\n")
1917 " with %d changes to %d files%s\n")
1918 % (changesets, revisions, files, heads))
1918 % (changesets, revisions, files, heads))
1919
1919
1920 if changesets > 0:
1920 if changesets > 0:
1921 self.hook('pretxnchangegroup', throw=True,
1921 self.hook('pretxnchangegroup', throw=True,
1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1923 url=url)
1923 url=url)
1924
1924
1925 tr.close()
1925 tr.close()
1926 finally:
1926 finally:
1927 del tr
1927 del tr
1928
1928
1929 if changesets > 0:
1929 if changesets > 0:
1930 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1930 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1931 source=srctype, url=url)
1931 source=srctype, url=url)
1932
1932
1933 for i in xrange(cor + 1, cnr + 1):
1933 for i in xrange(cor + 1, cnr + 1):
1934 self.hook("incoming", node=hex(self.changelog.node(i)),
1934 self.hook("incoming", node=hex(self.changelog.node(i)),
1935 source=srctype, url=url)
1935 source=srctype, url=url)
1936
1936
1937 # never return 0 here:
1937 # never return 0 here:
1938 if newheads < oldheads:
1938 if newheads < oldheads:
1939 return newheads - oldheads - 1
1939 return newheads - oldheads - 1
1940 else:
1940 else:
1941 return newheads - oldheads + 1
1941 return newheads - oldheads + 1
1942
1942
1943
1943
1944 def stream_in(self, remote):
1944 def stream_in(self, remote):
1945 fp = remote.stream_out()
1945 fp = remote.stream_out()
1946 l = fp.readline()
1946 l = fp.readline()
1947 try:
1947 try:
1948 resp = int(l)
1948 resp = int(l)
1949 except ValueError:
1949 except ValueError:
1950 raise util.UnexpectedOutput(
1950 raise util.UnexpectedOutput(
1951 _('Unexpected response from remote server:'), l)
1951 _('Unexpected response from remote server:'), l)
1952 if resp == 1:
1952 if resp == 1:
1953 raise util.Abort(_('operation forbidden by server'))
1953 raise util.Abort(_('operation forbidden by server'))
1954 elif resp == 2:
1954 elif resp == 2:
1955 raise util.Abort(_('locking the remote repository failed'))
1955 raise util.Abort(_('locking the remote repository failed'))
1956 elif resp != 0:
1956 elif resp != 0:
1957 raise util.Abort(_('the server sent an unknown error code'))
1957 raise util.Abort(_('the server sent an unknown error code'))
1958 self.ui.status(_('streaming all changes\n'))
1958 self.ui.status(_('streaming all changes\n'))
1959 l = fp.readline()
1959 l = fp.readline()
1960 try:
1960 try:
1961 total_files, total_bytes = map(int, l.split(' ', 1))
1961 total_files, total_bytes = map(int, l.split(' ', 1))
1962 except ValueError, TypeError:
1962 except ValueError, TypeError:
1963 raise util.UnexpectedOutput(
1963 raise util.UnexpectedOutput(
1964 _('Unexpected response from remote server:'), l)
1964 _('Unexpected response from remote server:'), l)
1965 self.ui.status(_('%d files to transfer, %s of data\n') %
1965 self.ui.status(_('%d files to transfer, %s of data\n') %
1966 (total_files, util.bytecount(total_bytes)))
1966 (total_files, util.bytecount(total_bytes)))
1967 start = time.time()
1967 start = time.time()
1968 for i in xrange(total_files):
1968 for i in xrange(total_files):
1969 # XXX doesn't support '\n' or '\r' in filenames
1969 # XXX doesn't support '\n' or '\r' in filenames
1970 l = fp.readline()
1970 l = fp.readline()
1971 try:
1971 try:
1972 name, size = l.split('\0', 1)
1972 name, size = l.split('\0', 1)
1973 size = int(size)
1973 size = int(size)
1974 except ValueError, TypeError:
1974 except ValueError, TypeError:
1975 raise util.UnexpectedOutput(
1975 raise util.UnexpectedOutput(
1976 _('Unexpected response from remote server:'), l)
1976 _('Unexpected response from remote server:'), l)
1977 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1977 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1978 ofp = self.sopener(name, 'w')
1978 ofp = self.sopener(name, 'w')
1979 for chunk in util.filechunkiter(fp, limit=size):
1979 for chunk in util.filechunkiter(fp, limit=size):
1980 ofp.write(chunk)
1980 ofp.write(chunk)
1981 ofp.close()
1981 ofp.close()
1982 elapsed = time.time() - start
1982 elapsed = time.time() - start
1983 if elapsed <= 0:
1983 if elapsed <= 0:
1984 elapsed = 0.001
1984 elapsed = 0.001
1985 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1985 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1986 (util.bytecount(total_bytes), elapsed,
1986 (util.bytecount(total_bytes), elapsed,
1987 util.bytecount(total_bytes / elapsed)))
1987 util.bytecount(total_bytes / elapsed)))
1988 self.invalidate()
1988 self.invalidate()
1989 return len(self.heads()) + 1
1989 return len(self.heads()) + 1
1990
1990
1991 def clone(self, remote, heads=[], stream=False):
1991 def clone(self, remote, heads=[], stream=False):
1992 '''clone remote repository.
1992 '''clone remote repository.
1993
1993
1994 keyword arguments:
1994 keyword arguments:
1995 heads: list of revs to clone (forces use of pull)
1995 heads: list of revs to clone (forces use of pull)
1996 stream: use streaming clone if possible'''
1996 stream: use streaming clone if possible'''
1997
1997
1998 # now, all clients that can request uncompressed clones can
1998 # now, all clients that can request uncompressed clones can
1999 # read repo formats supported by all servers that can serve
1999 # read repo formats supported by all servers that can serve
2000 # them.
2000 # them.
2001
2001
2002 # if revlog format changes, client will have to check version
2002 # if revlog format changes, client will have to check version
2003 # and format flags on "stream" capability, and use
2003 # and format flags on "stream" capability, and use
2004 # uncompressed only if compatible.
2004 # uncompressed only if compatible.
2005
2005
2006 if stream and not heads and remote.capable('stream'):
2006 if stream and not heads and remote.capable('stream'):
2007 return self.stream_in(remote)
2007 return self.stream_in(remote)
2008 return self.pull(remote, heads)
2008 return self.pull(remote, heads)
2009
2009
2010 # used to avoid circular references so destructors work
2010 # used to avoid circular references so destructors work
2011 def aftertrans(files):
2011 def aftertrans(files):
2012 renamefiles = [tuple(t) for t in files]
2012 renamefiles = [tuple(t) for t in files]
2013 def a():
2013 def a():
2014 for src, dest in renamefiles:
2014 for src, dest in renamefiles:
2015 util.rename(src, dest)
2015 util.rename(src, dest)
2016 return a
2016 return a
2017
2017
2018 def instance(ui, path, create):
2018 def instance(ui, path, create):
2019 return localrepository(ui, util.drop_scheme('file', path), create)
2019 return localrepository(ui, util.drop_scheme('file', path), create)
2020
2020
2021 def islocal(path):
2021 def islocal(path):
2022 return True
2022 return True
General Comments 0
You need to be logged in to leave comments. Login now