##// END OF EJS Templates
wwrite: simplify with util.set_flags...
Matt Mackall -
r5703:14789f30 default
parent child Browse files
Show More
@@ -1,2026 +1,2022 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.filterpats = {}
85 self.filterpats = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError, name
101 raise AttributeError, name
102
102
103 def url(self):
103 def url(self):
104 return 'file:' + self.root
104 return 'file:' + self.root
105
105
106 def hook(self, name, throw=False, **args):
106 def hook(self, name, throw=False, **args):
107 return hook.hook(self.ui, self, name, throw, **args)
107 return hook.hook(self.ui, self, name, throw, **args)
108
108
109 tag_disallowed = ':\r\n'
109 tag_disallowed = ':\r\n'
110
110
111 def _tag(self, name, node, message, local, user, date, parent=None,
111 def _tag(self, name, node, message, local, user, date, parent=None,
112 extra={}):
112 extra={}):
113 use_dirstate = parent is None
113 use_dirstate = parent is None
114
114
115 for c in self.tag_disallowed:
115 for c in self.tag_disallowed:
116 if c in name:
116 if c in name:
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
117 raise util.Abort(_('%r cannot be used in a tag name') % c)
118
118
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
120
120
121 def writetag(fp, name, munge, prevtags):
121 def writetag(fp, name, munge, prevtags):
122 if prevtags and prevtags[-1] != '\n':
122 if prevtags and prevtags[-1] != '\n':
123 fp.write('\n')
123 fp.write('\n')
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
125 fp.close()
125 fp.close()
126 self.hook('tag', node=hex(node), tag=name, local=local)
126 self.hook('tag', node=hex(node), tag=name, local=local)
127
127
128 prevtags = ''
128 prevtags = ''
129 if local:
129 if local:
130 try:
130 try:
131 fp = self.opener('localtags', 'r+')
131 fp = self.opener('localtags', 'r+')
132 except IOError, err:
132 except IOError, err:
133 fp = self.opener('localtags', 'a')
133 fp = self.opener('localtags', 'a')
134 else:
134 else:
135 prevtags = fp.read()
135 prevtags = fp.read()
136
136
137 # local tags are stored in the current charset
137 # local tags are stored in the current charset
138 writetag(fp, name, None, prevtags)
138 writetag(fp, name, None, prevtags)
139 return
139 return
140
140
141 if use_dirstate:
141 if use_dirstate:
142 try:
142 try:
143 fp = self.wfile('.hgtags', 'rb+')
143 fp = self.wfile('.hgtags', 'rb+')
144 except IOError, err:
144 except IOError, err:
145 fp = self.wfile('.hgtags', 'ab')
145 fp = self.wfile('.hgtags', 'ab')
146 else:
146 else:
147 prevtags = fp.read()
147 prevtags = fp.read()
148 else:
148 else:
149 try:
149 try:
150 prevtags = self.filectx('.hgtags', parent).data()
150 prevtags = self.filectx('.hgtags', parent).data()
151 except revlog.LookupError:
151 except revlog.LookupError:
152 pass
152 pass
153 fp = self.wfile('.hgtags', 'wb')
153 fp = self.wfile('.hgtags', 'wb')
154 if prevtags:
154 if prevtags:
155 fp.write(prevtags)
155 fp.write(prevtags)
156
156
157 # committed tags are stored in UTF-8
157 # committed tags are stored in UTF-8
158 writetag(fp, name, util.fromlocal, prevtags)
158 writetag(fp, name, util.fromlocal, prevtags)
159
159
160 if use_dirstate and '.hgtags' not in self.dirstate:
160 if use_dirstate and '.hgtags' not in self.dirstate:
161 self.add(['.hgtags'])
161 self.add(['.hgtags'])
162
162
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
164 extra=extra)
164 extra=extra)
165
165
166 self.hook('tag', node=hex(node), tag=name, local=local)
166 self.hook('tag', node=hex(node), tag=name, local=local)
167
167
168 return tagnode
168 return tagnode
169
169
170 def tag(self, name, node, message, local, user, date):
170 def tag(self, name, node, message, local, user, date):
171 '''tag a revision with a symbolic name.
171 '''tag a revision with a symbolic name.
172
172
173 if local is True, the tag is stored in a per-repository file.
173 if local is True, the tag is stored in a per-repository file.
174 otherwise, it is stored in the .hgtags file, and a new
174 otherwise, it is stored in the .hgtags file, and a new
175 changeset is committed with the change.
175 changeset is committed with the change.
176
176
177 keyword arguments:
177 keyword arguments:
178
178
179 local: whether to store tag in non-version-controlled file
179 local: whether to store tag in non-version-controlled file
180 (default False)
180 (default False)
181
181
182 message: commit message to use if committing
182 message: commit message to use if committing
183
183
184 user: name of user to use if committing
184 user: name of user to use if committing
185
185
186 date: date tuple to use if committing'''
186 date: date tuple to use if committing'''
187
187
188 for x in self.status()[:5]:
188 for x in self.status()[:5]:
189 if '.hgtags' in x:
189 if '.hgtags' in x:
190 raise util.Abort(_('working copy of .hgtags is changed '
190 raise util.Abort(_('working copy of .hgtags is changed '
191 '(please commit .hgtags manually)'))
191 '(please commit .hgtags manually)'))
192
192
193
193
194 self._tag(name, node, message, local, user, date)
194 self._tag(name, node, message, local, user, date)
195
195
196 def tags(self):
196 def tags(self):
197 '''return a mapping of tag to node'''
197 '''return a mapping of tag to node'''
198 if self.tagscache:
198 if self.tagscache:
199 return self.tagscache
199 return self.tagscache
200
200
201 globaltags = {}
201 globaltags = {}
202 tagtypes = {}
202 tagtypes = {}
203
203
204 def readtags(lines, fn, tagtype):
204 def readtags(lines, fn, tagtype):
205 filetags = {}
205 filetags = {}
206 count = 0
206 count = 0
207
207
208 def warn(msg):
208 def warn(msg):
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
209 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
210
210
211 for l in lines:
211 for l in lines:
212 count += 1
212 count += 1
213 if not l:
213 if not l:
214 continue
214 continue
215 s = l.split(" ", 1)
215 s = l.split(" ", 1)
216 if len(s) != 2:
216 if len(s) != 2:
217 warn(_("cannot parse entry"))
217 warn(_("cannot parse entry"))
218 continue
218 continue
219 node, key = s
219 node, key = s
220 key = util.tolocal(key.strip()) # stored in UTF-8
220 key = util.tolocal(key.strip()) # stored in UTF-8
221 try:
221 try:
222 bin_n = bin(node)
222 bin_n = bin(node)
223 except TypeError:
223 except TypeError:
224 warn(_("node '%s' is not well formed") % node)
224 warn(_("node '%s' is not well formed") % node)
225 continue
225 continue
226 if bin_n not in self.changelog.nodemap:
226 if bin_n not in self.changelog.nodemap:
227 warn(_("tag '%s' refers to unknown node") % key)
227 warn(_("tag '%s' refers to unknown node") % key)
228 continue
228 continue
229
229
230 h = []
230 h = []
231 if key in filetags:
231 if key in filetags:
232 n, h = filetags[key]
232 n, h = filetags[key]
233 h.append(n)
233 h.append(n)
234 filetags[key] = (bin_n, h)
234 filetags[key] = (bin_n, h)
235
235
236 for k, nh in filetags.items():
236 for k, nh in filetags.items():
237 if k not in globaltags:
237 if k not in globaltags:
238 globaltags[k] = nh
238 globaltags[k] = nh
239 tagtypes[k] = tagtype
239 tagtypes[k] = tagtype
240 continue
240 continue
241
241
242 # we prefer the global tag if:
242 # we prefer the global tag if:
243 # it supercedes us OR
243 # it supercedes us OR
244 # mutual supercedes and it has a higher rank
244 # mutual supercedes and it has a higher rank
245 # otherwise we win because we're tip-most
245 # otherwise we win because we're tip-most
246 an, ah = nh
246 an, ah = nh
247 bn, bh = globaltags[k]
247 bn, bh = globaltags[k]
248 if (bn != an and an in bh and
248 if (bn != an and an in bh and
249 (bn not in ah or len(bh) > len(ah))):
249 (bn not in ah or len(bh) > len(ah))):
250 an = bn
250 an = bn
251 ah.extend([n for n in bh if n not in ah])
251 ah.extend([n for n in bh if n not in ah])
252 globaltags[k] = an, ah
252 globaltags[k] = an, ah
253 tagtypes[k] = tagtype
253 tagtypes[k] = tagtype
254
254
255 # read the tags file from each head, ending with the tip
255 # read the tags file from each head, ending with the tip
256 f = None
256 f = None
257 for rev, node, fnode in self._hgtagsnodes():
257 for rev, node, fnode in self._hgtagsnodes():
258 f = (f and f.filectx(fnode) or
258 f = (f and f.filectx(fnode) or
259 self.filectx('.hgtags', fileid=fnode))
259 self.filectx('.hgtags', fileid=fnode))
260 readtags(f.data().splitlines(), f, "global")
260 readtags(f.data().splitlines(), f, "global")
261
261
262 try:
262 try:
263 data = util.fromlocal(self.opener("localtags").read())
263 data = util.fromlocal(self.opener("localtags").read())
264 # localtags are stored in the local character set
264 # localtags are stored in the local character set
265 # while the internal tag table is stored in UTF-8
265 # while the internal tag table is stored in UTF-8
266 readtags(data.splitlines(), "localtags", "local")
266 readtags(data.splitlines(), "localtags", "local")
267 except IOError:
267 except IOError:
268 pass
268 pass
269
269
270 self.tagscache = {}
270 self.tagscache = {}
271 self._tagstypecache = {}
271 self._tagstypecache = {}
272 for k,nh in globaltags.items():
272 for k,nh in globaltags.items():
273 n = nh[0]
273 n = nh[0]
274 if n != nullid:
274 if n != nullid:
275 self.tagscache[k] = n
275 self.tagscache[k] = n
276 self._tagstypecache[k] = tagtypes[k]
276 self._tagstypecache[k] = tagtypes[k]
277 self.tagscache['tip'] = self.changelog.tip()
277 self.tagscache['tip'] = self.changelog.tip()
278
278
279 return self.tagscache
279 return self.tagscache
280
280
281 def tagtype(self, tagname):
281 def tagtype(self, tagname):
282 '''
282 '''
283 return the type of the given tag. result can be:
283 return the type of the given tag. result can be:
284
284
285 'local' : a local tag
285 'local' : a local tag
286 'global' : a global tag
286 'global' : a global tag
287 None : tag does not exist
287 None : tag does not exist
288 '''
288 '''
289
289
290 self.tags()
290 self.tags()
291
291
292 return self._tagstypecache.get(tagname)
292 return self._tagstypecache.get(tagname)
293
293
294 def _hgtagsnodes(self):
294 def _hgtagsnodes(self):
295 heads = self.heads()
295 heads = self.heads()
296 heads.reverse()
296 heads.reverse()
297 last = {}
297 last = {}
298 ret = []
298 ret = []
299 for node in heads:
299 for node in heads:
300 c = self.changectx(node)
300 c = self.changectx(node)
301 rev = c.rev()
301 rev = c.rev()
302 try:
302 try:
303 fnode = c.filenode('.hgtags')
303 fnode = c.filenode('.hgtags')
304 except revlog.LookupError:
304 except revlog.LookupError:
305 continue
305 continue
306 ret.append((rev, node, fnode))
306 ret.append((rev, node, fnode))
307 if fnode in last:
307 if fnode in last:
308 ret[last[fnode]] = None
308 ret[last[fnode]] = None
309 last[fnode] = len(ret) - 1
309 last[fnode] = len(ret) - 1
310 return [item for item in ret if item]
310 return [item for item in ret if item]
311
311
312 def tagslist(self):
312 def tagslist(self):
313 '''return a list of tags ordered by revision'''
313 '''return a list of tags ordered by revision'''
314 l = []
314 l = []
315 for t, n in self.tags().items():
315 for t, n in self.tags().items():
316 try:
316 try:
317 r = self.changelog.rev(n)
317 r = self.changelog.rev(n)
318 except:
318 except:
319 r = -2 # sort to the beginning of the list if unknown
319 r = -2 # sort to the beginning of the list if unknown
320 l.append((r, t, n))
320 l.append((r, t, n))
321 l.sort()
321 l.sort()
322 return [(t, n) for r, t, n in l]
322 return [(t, n) for r, t, n in l]
323
323
324 def nodetags(self, node):
324 def nodetags(self, node):
325 '''return the tags associated with a node'''
325 '''return the tags associated with a node'''
326 if not self.nodetagscache:
326 if not self.nodetagscache:
327 self.nodetagscache = {}
327 self.nodetagscache = {}
328 for t, n in self.tags().items():
328 for t, n in self.tags().items():
329 self.nodetagscache.setdefault(n, []).append(t)
329 self.nodetagscache.setdefault(n, []).append(t)
330 return self.nodetagscache.get(node, [])
330 return self.nodetagscache.get(node, [])
331
331
332 def _branchtags(self):
332 def _branchtags(self):
333 partial, last, lrev = self._readbranchcache()
333 partial, last, lrev = self._readbranchcache()
334
334
335 tiprev = self.changelog.count() - 1
335 tiprev = self.changelog.count() - 1
336 if lrev != tiprev:
336 if lrev != tiprev:
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
337 self._updatebranchcache(partial, lrev+1, tiprev+1)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
338 self._writebranchcache(partial, self.changelog.tip(), tiprev)
339
339
340 return partial
340 return partial
341
341
342 def branchtags(self):
342 def branchtags(self):
343 if self.branchcache is not None:
343 if self.branchcache is not None:
344 return self.branchcache
344 return self.branchcache
345
345
346 self.branchcache = {} # avoid recursion in changectx
346 self.branchcache = {} # avoid recursion in changectx
347 partial = self._branchtags()
347 partial = self._branchtags()
348
348
349 # the branch cache is stored on disk as UTF-8, but in the local
349 # the branch cache is stored on disk as UTF-8, but in the local
350 # charset internally
350 # charset internally
351 for k, v in partial.items():
351 for k, v in partial.items():
352 self.branchcache[util.tolocal(k)] = v
352 self.branchcache[util.tolocal(k)] = v
353 return self.branchcache
353 return self.branchcache
354
354
355 def _readbranchcache(self):
355 def _readbranchcache(self):
356 partial = {}
356 partial = {}
357 try:
357 try:
358 f = self.opener("branch.cache")
358 f = self.opener("branch.cache")
359 lines = f.read().split('\n')
359 lines = f.read().split('\n')
360 f.close()
360 f.close()
361 except (IOError, OSError):
361 except (IOError, OSError):
362 return {}, nullid, nullrev
362 return {}, nullid, nullrev
363
363
364 try:
364 try:
365 last, lrev = lines.pop(0).split(" ", 1)
365 last, lrev = lines.pop(0).split(" ", 1)
366 last, lrev = bin(last), int(lrev)
366 last, lrev = bin(last), int(lrev)
367 if not (lrev < self.changelog.count() and
367 if not (lrev < self.changelog.count() and
368 self.changelog.node(lrev) == last): # sanity check
368 self.changelog.node(lrev) == last): # sanity check
369 # invalidate the cache
369 # invalidate the cache
370 raise ValueError('Invalid branch cache: unknown tip')
370 raise ValueError('Invalid branch cache: unknown tip')
371 for l in lines:
371 for l in lines:
372 if not l: continue
372 if not l: continue
373 node, label = l.split(" ", 1)
373 node, label = l.split(" ", 1)
374 partial[label.strip()] = bin(node)
374 partial[label.strip()] = bin(node)
375 except (KeyboardInterrupt, util.SignalInterrupt):
375 except (KeyboardInterrupt, util.SignalInterrupt):
376 raise
376 raise
377 except Exception, inst:
377 except Exception, inst:
378 if self.ui.debugflag:
378 if self.ui.debugflag:
379 self.ui.warn(str(inst), '\n')
379 self.ui.warn(str(inst), '\n')
380 partial, last, lrev = {}, nullid, nullrev
380 partial, last, lrev = {}, nullid, nullrev
381 return partial, last, lrev
381 return partial, last, lrev
382
382
383 def _writebranchcache(self, branches, tip, tiprev):
383 def _writebranchcache(self, branches, tip, tiprev):
384 try:
384 try:
385 f = self.opener("branch.cache", "w", atomictemp=True)
385 f = self.opener("branch.cache", "w", atomictemp=True)
386 f.write("%s %s\n" % (hex(tip), tiprev))
386 f.write("%s %s\n" % (hex(tip), tiprev))
387 for label, node in branches.iteritems():
387 for label, node in branches.iteritems():
388 f.write("%s %s\n" % (hex(node), label))
388 f.write("%s %s\n" % (hex(node), label))
389 f.rename()
389 f.rename()
390 except (IOError, OSError):
390 except (IOError, OSError):
391 pass
391 pass
392
392
393 def _updatebranchcache(self, partial, start, end):
393 def _updatebranchcache(self, partial, start, end):
394 for r in xrange(start, end):
394 for r in xrange(start, end):
395 c = self.changectx(r)
395 c = self.changectx(r)
396 b = c.branch()
396 b = c.branch()
397 partial[b] = c.node()
397 partial[b] = c.node()
398
398
399 def lookup(self, key):
399 def lookup(self, key):
400 if key == '.':
400 if key == '.':
401 key, second = self.dirstate.parents()
401 key, second = self.dirstate.parents()
402 if key == nullid:
402 if key == nullid:
403 raise repo.RepoError(_("no revision checked out"))
403 raise repo.RepoError(_("no revision checked out"))
404 if second != nullid:
404 if second != nullid:
405 self.ui.warn(_("warning: working directory has two parents, "
405 self.ui.warn(_("warning: working directory has two parents, "
406 "tag '.' uses the first\n"))
406 "tag '.' uses the first\n"))
407 elif key == 'null':
407 elif key == 'null':
408 return nullid
408 return nullid
409 n = self.changelog._match(key)
409 n = self.changelog._match(key)
410 if n:
410 if n:
411 return n
411 return n
412 if key in self.tags():
412 if key in self.tags():
413 return self.tags()[key]
413 return self.tags()[key]
414 if key in self.branchtags():
414 if key in self.branchtags():
415 return self.branchtags()[key]
415 return self.branchtags()[key]
416 n = self.changelog._partialmatch(key)
416 n = self.changelog._partialmatch(key)
417 if n:
417 if n:
418 return n
418 return n
419 try:
419 try:
420 if len(key) == 20:
420 if len(key) == 20:
421 key = hex(key)
421 key = hex(key)
422 except:
422 except:
423 pass
423 pass
424 raise repo.RepoError(_("unknown revision '%s'") % key)
424 raise repo.RepoError(_("unknown revision '%s'") % key)
425
425
426 def dev(self):
426 def dev(self):
427 return os.lstat(self.path).st_dev
427 return os.lstat(self.path).st_dev
428
428
429 def local(self):
429 def local(self):
430 return True
430 return True
431
431
432 def join(self, f):
432 def join(self, f):
433 return os.path.join(self.path, f)
433 return os.path.join(self.path, f)
434
434
435 def sjoin(self, f):
435 def sjoin(self, f):
436 f = self.encodefn(f)
436 f = self.encodefn(f)
437 return os.path.join(self.spath, f)
437 return os.path.join(self.spath, f)
438
438
439 def wjoin(self, f):
439 def wjoin(self, f):
440 return os.path.join(self.root, f)
440 return os.path.join(self.root, f)
441
441
442 def file(self, f):
442 def file(self, f):
443 if f[0] == '/':
443 if f[0] == '/':
444 f = f[1:]
444 f = f[1:]
445 return filelog.filelog(self.sopener, f)
445 return filelog.filelog(self.sopener, f)
446
446
447 def changectx(self, changeid=None):
447 def changectx(self, changeid=None):
448 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
449
449
450 def workingctx(self):
450 def workingctx(self):
451 return context.workingctx(self)
451 return context.workingctx(self)
452
452
453 def parents(self, changeid=None):
453 def parents(self, changeid=None):
454 '''
454 '''
455 get list of changectxs for parents of changeid or working directory
455 get list of changectxs for parents of changeid or working directory
456 '''
456 '''
457 if changeid is None:
457 if changeid is None:
458 pl = self.dirstate.parents()
458 pl = self.dirstate.parents()
459 else:
459 else:
460 n = self.changelog.lookup(changeid)
460 n = self.changelog.lookup(changeid)
461 pl = self.changelog.parents(n)
461 pl = self.changelog.parents(n)
462 if pl[1] == nullid:
462 if pl[1] == nullid:
463 return [self.changectx(pl[0])]
463 return [self.changectx(pl[0])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
464 return [self.changectx(pl[0]), self.changectx(pl[1])]
465
465
466 def filectx(self, path, changeid=None, fileid=None):
466 def filectx(self, path, changeid=None, fileid=None):
467 """changeid can be a changeset revision, node, or tag.
467 """changeid can be a changeset revision, node, or tag.
468 fileid can be a file revision or node."""
468 fileid can be a file revision or node."""
469 return context.filectx(self, path, changeid, fileid)
469 return context.filectx(self, path, changeid, fileid)
470
470
471 def getcwd(self):
471 def getcwd(self):
472 return self.dirstate.getcwd()
472 return self.dirstate.getcwd()
473
473
474 def pathto(self, f, cwd=None):
474 def pathto(self, f, cwd=None):
475 return self.dirstate.pathto(f, cwd)
475 return self.dirstate.pathto(f, cwd)
476
476
477 def wfile(self, f, mode='r'):
477 def wfile(self, f, mode='r'):
478 return self.wopener(f, mode)
478 return self.wopener(f, mode)
479
479
480 def _link(self, f):
480 def _link(self, f):
481 return os.path.islink(self.wjoin(f))
481 return os.path.islink(self.wjoin(f))
482
482
483 def _filter(self, filter, filename, data):
483 def _filter(self, filter, filename, data):
484 if filter not in self.filterpats:
484 if filter not in self.filterpats:
485 l = []
485 l = []
486 for pat, cmd in self.ui.configitems(filter):
486 for pat, cmd in self.ui.configitems(filter):
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 l.append((mf, cmd))
488 l.append((mf, cmd))
489 self.filterpats[filter] = l
489 self.filterpats[filter] = l
490
490
491 for mf, cmd in self.filterpats[filter]:
491 for mf, cmd in self.filterpats[filter]:
492 if mf(filename):
492 if mf(filename):
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 data = util.filter(data, cmd)
494 data = util.filter(data, cmd)
495 break
495 break
496
496
497 return data
497 return data
498
498
499 def wread(self, filename):
499 def wread(self, filename):
500 if self._link(filename):
500 if self._link(filename):
501 data = os.readlink(self.wjoin(filename))
501 data = os.readlink(self.wjoin(filename))
502 else:
502 else:
503 data = self.wopener(filename, 'r').read()
503 data = self.wopener(filename, 'r').read()
504 return self._filter("encode", filename, data)
504 return self._filter("encode", filename, data)
505
505
506 def wwrite(self, filename, data, flags):
506 def wwrite(self, filename, data, flags):
507 data = self._filter("decode", filename, data)
507 data = self._filter("decode", filename, data)
508 if "l" in flags:
508 try:
509 self.wopener.symlink(data, filename)
509 os.unlink(self.wjoin(filename))
510 else:
510 except OSError:
511 try:
511 pass
512 if self._link(filename):
512 self.wopener(filename, 'w').write(data)
513 os.unlink(self.wjoin(filename))
513 util.set_flags(self.wjoin(filename), flags)
514 except OSError:
515 pass
516 self.wopener(filename, 'w').write(data)
517 util.set_exec(self.wjoin(filename), "x" in flags)
518
514
519 def wwritedata(self, filename, data):
515 def wwritedata(self, filename, data):
520 return self._filter("decode", filename, data)
516 return self._filter("decode", filename, data)
521
517
522 def transaction(self):
518 def transaction(self):
523 if self._transref and self._transref():
519 if self._transref and self._transref():
524 return self._transref().nest()
520 return self._transref().nest()
525
521
526 # save dirstate for rollback
522 # save dirstate for rollback
527 try:
523 try:
528 ds = self.opener("dirstate").read()
524 ds = self.opener("dirstate").read()
529 except IOError:
525 except IOError:
530 ds = ""
526 ds = ""
531 self.opener("journal.dirstate", "w").write(ds)
527 self.opener("journal.dirstate", "w").write(ds)
532
528
533 renames = [(self.sjoin("journal"), self.sjoin("undo")),
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
534 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
535 tr = transaction.transaction(self.ui.warn, self.sopener,
531 tr = transaction.transaction(self.ui.warn, self.sopener,
536 self.sjoin("journal"),
532 self.sjoin("journal"),
537 aftertrans(renames))
533 aftertrans(renames))
538 self._transref = weakref.ref(tr)
534 self._transref = weakref.ref(tr)
539 return tr
535 return tr
540
536
541 def recover(self):
537 def recover(self):
542 l = self.lock()
538 l = self.lock()
543 try:
539 try:
544 if os.path.exists(self.sjoin("journal")):
540 if os.path.exists(self.sjoin("journal")):
545 self.ui.status(_("rolling back interrupted transaction\n"))
541 self.ui.status(_("rolling back interrupted transaction\n"))
546 transaction.rollback(self.sopener, self.sjoin("journal"))
542 transaction.rollback(self.sopener, self.sjoin("journal"))
547 self.invalidate()
543 self.invalidate()
548 return True
544 return True
549 else:
545 else:
550 self.ui.warn(_("no interrupted transaction available\n"))
546 self.ui.warn(_("no interrupted transaction available\n"))
551 return False
547 return False
552 finally:
548 finally:
553 del l
549 del l
554
550
555 def rollback(self):
551 def rollback(self):
556 wlock = lock = None
552 wlock = lock = None
557 try:
553 try:
558 wlock = self.wlock()
554 wlock = self.wlock()
559 lock = self.lock()
555 lock = self.lock()
560 if os.path.exists(self.sjoin("undo")):
556 if os.path.exists(self.sjoin("undo")):
561 self.ui.status(_("rolling back last transaction\n"))
557 self.ui.status(_("rolling back last transaction\n"))
562 transaction.rollback(self.sopener, self.sjoin("undo"))
558 transaction.rollback(self.sopener, self.sjoin("undo"))
563 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
559 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
564 self.invalidate()
560 self.invalidate()
565 self.dirstate.invalidate()
561 self.dirstate.invalidate()
566 else:
562 else:
567 self.ui.warn(_("no rollback information available\n"))
563 self.ui.warn(_("no rollback information available\n"))
568 finally:
564 finally:
569 del lock, wlock
565 del lock, wlock
570
566
571 def invalidate(self):
567 def invalidate(self):
572 for a in "changelog manifest".split():
568 for a in "changelog manifest".split():
573 if hasattr(self, a):
569 if hasattr(self, a):
574 self.__delattr__(a)
570 self.__delattr__(a)
575 self.tagscache = None
571 self.tagscache = None
576 self._tagstypecache = None
572 self._tagstypecache = None
577 self.nodetagscache = None
573 self.nodetagscache = None
578
574
579 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
575 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
580 try:
576 try:
581 l = lock.lock(lockname, 0, releasefn, desc=desc)
577 l = lock.lock(lockname, 0, releasefn, desc=desc)
582 except lock.LockHeld, inst:
578 except lock.LockHeld, inst:
583 if not wait:
579 if not wait:
584 raise
580 raise
585 self.ui.warn(_("waiting for lock on %s held by %r\n") %
581 self.ui.warn(_("waiting for lock on %s held by %r\n") %
586 (desc, inst.locker))
582 (desc, inst.locker))
587 # default to 600 seconds timeout
583 # default to 600 seconds timeout
588 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
584 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
589 releasefn, desc=desc)
585 releasefn, desc=desc)
590 if acquirefn:
586 if acquirefn:
591 acquirefn()
587 acquirefn()
592 return l
588 return l
593
589
594 def lock(self, wait=True):
590 def lock(self, wait=True):
595 if self._lockref and self._lockref():
591 if self._lockref and self._lockref():
596 return self._lockref()
592 return self._lockref()
597
593
598 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
594 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
599 _('repository %s') % self.origroot)
595 _('repository %s') % self.origroot)
600 self._lockref = weakref.ref(l)
596 self._lockref = weakref.ref(l)
601 return l
597 return l
602
598
603 def wlock(self, wait=True):
599 def wlock(self, wait=True):
604 if self._wlockref and self._wlockref():
600 if self._wlockref and self._wlockref():
605 return self._wlockref()
601 return self._wlockref()
606
602
607 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
603 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
608 self.dirstate.invalidate, _('working directory of %s') %
604 self.dirstate.invalidate, _('working directory of %s') %
609 self.origroot)
605 self.origroot)
610 self._wlockref = weakref.ref(l)
606 self._wlockref = weakref.ref(l)
611 return l
607 return l
612
608
613 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
609 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
614 """
610 """
615 commit an individual file as part of a larger transaction
611 commit an individual file as part of a larger transaction
616 """
612 """
617
613
618 t = self.wread(fn)
614 t = self.wread(fn)
619 fl = self.file(fn)
615 fl = self.file(fn)
620 fp1 = manifest1.get(fn, nullid)
616 fp1 = manifest1.get(fn, nullid)
621 fp2 = manifest2.get(fn, nullid)
617 fp2 = manifest2.get(fn, nullid)
622
618
623 meta = {}
619 meta = {}
624 cp = self.dirstate.copied(fn)
620 cp = self.dirstate.copied(fn)
625 if cp:
621 if cp:
626 # Mark the new revision of this file as a copy of another
622 # Mark the new revision of this file as a copy of another
627 # file. This copy data will effectively act as a parent
623 # file. This copy data will effectively act as a parent
628 # of this new revision. If this is a merge, the first
624 # of this new revision. If this is a merge, the first
629 # parent will be the nullid (meaning "look up the copy data")
625 # parent will be the nullid (meaning "look up the copy data")
630 # and the second one will be the other parent. For example:
626 # and the second one will be the other parent. For example:
631 #
627 #
632 # 0 --- 1 --- 3 rev1 changes file foo
628 # 0 --- 1 --- 3 rev1 changes file foo
633 # \ / rev2 renames foo to bar and changes it
629 # \ / rev2 renames foo to bar and changes it
634 # \- 2 -/ rev3 should have bar with all changes and
630 # \- 2 -/ rev3 should have bar with all changes and
635 # should record that bar descends from
631 # should record that bar descends from
636 # bar in rev2 and foo in rev1
632 # bar in rev2 and foo in rev1
637 #
633 #
638 # this allows this merge to succeed:
634 # this allows this merge to succeed:
639 #
635 #
640 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
636 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
641 # \ / merging rev3 and rev4 should use bar@rev2
637 # \ / merging rev3 and rev4 should use bar@rev2
642 # \- 2 --- 4 as the merge base
638 # \- 2 --- 4 as the merge base
643 #
639 #
644 meta["copy"] = cp
640 meta["copy"] = cp
645 if not manifest2: # not a branch merge
641 if not manifest2: # not a branch merge
646 meta["copyrev"] = hex(manifest1.get(cp, nullid))
642 meta["copyrev"] = hex(manifest1.get(cp, nullid))
647 fp2 = nullid
643 fp2 = nullid
648 elif fp2 != nullid: # copied on remote side
644 elif fp2 != nullid: # copied on remote side
649 meta["copyrev"] = hex(manifest1.get(cp, nullid))
645 meta["copyrev"] = hex(manifest1.get(cp, nullid))
650 elif fp1 != nullid: # copied on local side, reversed
646 elif fp1 != nullid: # copied on local side, reversed
651 meta["copyrev"] = hex(manifest2.get(cp))
647 meta["copyrev"] = hex(manifest2.get(cp))
652 fp2 = fp1
648 fp2 = fp1
653 elif cp in manifest2: # directory rename on local side
649 elif cp in manifest2: # directory rename on local side
654 meta["copyrev"] = hex(manifest2[cp])
650 meta["copyrev"] = hex(manifest2[cp])
655 else: # directory rename on remote side
651 else: # directory rename on remote side
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
652 meta["copyrev"] = hex(manifest1.get(cp, nullid))
657 self.ui.debug(_(" %s: copy %s:%s\n") %
653 self.ui.debug(_(" %s: copy %s:%s\n") %
658 (fn, cp, meta["copyrev"]))
654 (fn, cp, meta["copyrev"]))
659 fp1 = nullid
655 fp1 = nullid
660 elif fp2 != nullid:
656 elif fp2 != nullid:
661 # is one parent an ancestor of the other?
657 # is one parent an ancestor of the other?
662 fpa = fl.ancestor(fp1, fp2)
658 fpa = fl.ancestor(fp1, fp2)
663 if fpa == fp1:
659 if fpa == fp1:
664 fp1, fp2 = fp2, nullid
660 fp1, fp2 = fp2, nullid
665 elif fpa == fp2:
661 elif fpa == fp2:
666 fp2 = nullid
662 fp2 = nullid
667
663
668 # is the file unmodified from the parent? report existing entry
664 # is the file unmodified from the parent? report existing entry
669 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
665 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
670 return fp1
666 return fp1
671
667
672 changelist.append(fn)
668 changelist.append(fn)
673 return fl.add(t, meta, tr, linkrev, fp1, fp2)
669 return fl.add(t, meta, tr, linkrev, fp1, fp2)
674
670
675 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
671 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
676 if p1 is None:
672 if p1 is None:
677 p1, p2 = self.dirstate.parents()
673 p1, p2 = self.dirstate.parents()
678 return self.commit(files=files, text=text, user=user, date=date,
674 return self.commit(files=files, text=text, user=user, date=date,
679 p1=p1, p2=p2, extra=extra, empty_ok=True)
675 p1=p1, p2=p2, extra=extra, empty_ok=True)
680
676
681 def commit(self, files=None, text="", user=None, date=None,
677 def commit(self, files=None, text="", user=None, date=None,
682 match=util.always, force=False, force_editor=False,
678 match=util.always, force=False, force_editor=False,
683 p1=None, p2=None, extra={}, empty_ok=False):
679 p1=None, p2=None, extra={}, empty_ok=False):
684 wlock = lock = tr = None
680 wlock = lock = tr = None
685 valid = 0 # don't save the dirstate if this isn't set
681 valid = 0 # don't save the dirstate if this isn't set
686 try:
682 try:
687 commit = []
683 commit = []
688 remove = []
684 remove = []
689 changed = []
685 changed = []
690 use_dirstate = (p1 is None) # not rawcommit
686 use_dirstate = (p1 is None) # not rawcommit
691 extra = extra.copy()
687 extra = extra.copy()
692
688
693 if use_dirstate:
689 if use_dirstate:
694 if files:
690 if files:
695 for f in files:
691 for f in files:
696 s = self.dirstate[f]
692 s = self.dirstate[f]
697 if s in 'nma':
693 if s in 'nma':
698 commit.append(f)
694 commit.append(f)
699 elif s == 'r':
695 elif s == 'r':
700 remove.append(f)
696 remove.append(f)
701 else:
697 else:
702 self.ui.warn(_("%s not tracked!\n") % f)
698 self.ui.warn(_("%s not tracked!\n") % f)
703 else:
699 else:
704 changes = self.status(match=match)[:5]
700 changes = self.status(match=match)[:5]
705 modified, added, removed, deleted, unknown = changes
701 modified, added, removed, deleted, unknown = changes
706 commit = modified + added
702 commit = modified + added
707 remove = removed
703 remove = removed
708 else:
704 else:
709 commit = files
705 commit = files
710
706
711 if use_dirstate:
707 if use_dirstate:
712 p1, p2 = self.dirstate.parents()
708 p1, p2 = self.dirstate.parents()
713 update_dirstate = True
709 update_dirstate = True
714 else:
710 else:
715 p1, p2 = p1, p2 or nullid
711 p1, p2 = p1, p2 or nullid
716 update_dirstate = (self.dirstate.parents()[0] == p1)
712 update_dirstate = (self.dirstate.parents()[0] == p1)
717
713
718 c1 = self.changelog.read(p1)
714 c1 = self.changelog.read(p1)
719 c2 = self.changelog.read(p2)
715 c2 = self.changelog.read(p2)
720 m1 = self.manifest.read(c1[0]).copy()
716 m1 = self.manifest.read(c1[0]).copy()
721 m2 = self.manifest.read(c2[0])
717 m2 = self.manifest.read(c2[0])
722
718
723 if use_dirstate:
719 if use_dirstate:
724 branchname = self.workingctx().branch()
720 branchname = self.workingctx().branch()
725 try:
721 try:
726 branchname = branchname.decode('UTF-8').encode('UTF-8')
722 branchname = branchname.decode('UTF-8').encode('UTF-8')
727 except UnicodeDecodeError:
723 except UnicodeDecodeError:
728 raise util.Abort(_('branch name not in UTF-8!'))
724 raise util.Abort(_('branch name not in UTF-8!'))
729 else:
725 else:
730 branchname = ""
726 branchname = ""
731
727
732 if use_dirstate:
728 if use_dirstate:
733 oldname = c1[5].get("branch") # stored in UTF-8
729 oldname = c1[5].get("branch") # stored in UTF-8
734 if (not commit and not remove and not force and p2 == nullid
730 if (not commit and not remove and not force and p2 == nullid
735 and branchname == oldname):
731 and branchname == oldname):
736 self.ui.status(_("nothing changed\n"))
732 self.ui.status(_("nothing changed\n"))
737 return None
733 return None
738
734
739 xp1 = hex(p1)
735 xp1 = hex(p1)
740 if p2 == nullid: xp2 = ''
736 if p2 == nullid: xp2 = ''
741 else: xp2 = hex(p2)
737 else: xp2 = hex(p2)
742
738
743 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
739 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
744
740
745 wlock = self.wlock()
741 wlock = self.wlock()
746 lock = self.lock()
742 lock = self.lock()
747 tr = self.transaction()
743 tr = self.transaction()
748 trp = weakref.proxy(tr)
744 trp = weakref.proxy(tr)
749
745
750 # check in files
746 # check in files
751 new = {}
747 new = {}
752 linkrev = self.changelog.count()
748 linkrev = self.changelog.count()
753 commit.sort()
749 commit.sort()
754 is_exec = util.execfunc(self.root, m1.execf)
750 is_exec = util.execfunc(self.root, m1.execf)
755 is_link = util.linkfunc(self.root, m1.linkf)
751 is_link = util.linkfunc(self.root, m1.linkf)
756 for f in commit:
752 for f in commit:
757 self.ui.note(f + "\n")
753 self.ui.note(f + "\n")
758 try:
754 try:
759 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
755 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
760 new_exec = is_exec(f)
756 new_exec = is_exec(f)
761 new_link = is_link(f)
757 new_link = is_link(f)
762 if ((not changed or changed[-1] != f) and
758 if ((not changed or changed[-1] != f) and
763 m2.get(f) != new[f]):
759 m2.get(f) != new[f]):
764 # mention the file in the changelog if some
760 # mention the file in the changelog if some
765 # flag changed, even if there was no content
761 # flag changed, even if there was no content
766 # change.
762 # change.
767 old_exec = m1.execf(f)
763 old_exec = m1.execf(f)
768 old_link = m1.linkf(f)
764 old_link = m1.linkf(f)
769 if old_exec != new_exec or old_link != new_link:
765 if old_exec != new_exec or old_link != new_link:
770 changed.append(f)
766 changed.append(f)
771 m1.set(f, new_exec, new_link)
767 m1.set(f, new_exec, new_link)
772 if use_dirstate:
768 if use_dirstate:
773 self.dirstate.normal(f)
769 self.dirstate.normal(f)
774
770
775 except (OSError, IOError):
771 except (OSError, IOError):
776 if use_dirstate:
772 if use_dirstate:
777 self.ui.warn(_("trouble committing %s!\n") % f)
773 self.ui.warn(_("trouble committing %s!\n") % f)
778 raise
774 raise
779 else:
775 else:
780 remove.append(f)
776 remove.append(f)
781
777
782 # update manifest
778 # update manifest
783 m1.update(new)
779 m1.update(new)
784 remove.sort()
780 remove.sort()
785 removed = []
781 removed = []
786
782
787 for f in remove:
783 for f in remove:
788 if f in m1:
784 if f in m1:
789 del m1[f]
785 del m1[f]
790 removed.append(f)
786 removed.append(f)
791 elif f in m2:
787 elif f in m2:
792 removed.append(f)
788 removed.append(f)
793 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
789 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
794 (new, removed))
790 (new, removed))
795
791
796 # add changeset
792 # add changeset
797 new = new.keys()
793 new = new.keys()
798 new.sort()
794 new.sort()
799
795
800 user = user or self.ui.username()
796 user = user or self.ui.username()
801 if (not empty_ok and not text) or force_editor:
797 if (not empty_ok and not text) or force_editor:
802 edittext = []
798 edittext = []
803 if text:
799 if text:
804 edittext.append(text)
800 edittext.append(text)
805 edittext.append("")
801 edittext.append("")
806 edittext.append("HG: user: %s" % user)
802 edittext.append("HG: user: %s" % user)
807 if p2 != nullid:
803 if p2 != nullid:
808 edittext.append("HG: branch merge")
804 edittext.append("HG: branch merge")
809 if branchname:
805 if branchname:
810 edittext.append("HG: branch %s" % util.tolocal(branchname))
806 edittext.append("HG: branch %s" % util.tolocal(branchname))
811 edittext.extend(["HG: changed %s" % f for f in changed])
807 edittext.extend(["HG: changed %s" % f for f in changed])
812 edittext.extend(["HG: removed %s" % f for f in removed])
808 edittext.extend(["HG: removed %s" % f for f in removed])
813 if not changed and not remove:
809 if not changed and not remove:
814 edittext.append("HG: no files changed")
810 edittext.append("HG: no files changed")
815 edittext.append("")
811 edittext.append("")
816 # run editor in the repository root
812 # run editor in the repository root
817 olddir = os.getcwd()
813 olddir = os.getcwd()
818 os.chdir(self.root)
814 os.chdir(self.root)
819 text = self.ui.edit("\n".join(edittext), user)
815 text = self.ui.edit("\n".join(edittext), user)
820 os.chdir(olddir)
816 os.chdir(olddir)
821
817
822 if branchname:
818 if branchname:
823 extra["branch"] = branchname
819 extra["branch"] = branchname
824
820
825 if use_dirstate:
821 if use_dirstate:
826 lines = [line.rstrip() for line in text.rstrip().splitlines()]
822 lines = [line.rstrip() for line in text.rstrip().splitlines()]
827 while lines and not lines[0]:
823 while lines and not lines[0]:
828 del lines[0]
824 del lines[0]
829 if not lines:
825 if not lines:
830 return None
826 return None
831 text = '\n'.join(lines)
827 text = '\n'.join(lines)
832
828
833 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
829 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
834 user, date, extra)
830 user, date, extra)
835 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
831 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
836 parent2=xp2)
832 parent2=xp2)
837 tr.close()
833 tr.close()
838
834
839 if self.branchcache and "branch" in extra:
835 if self.branchcache and "branch" in extra:
840 self.branchcache[util.tolocal(extra["branch"])] = n
836 self.branchcache[util.tolocal(extra["branch"])] = n
841
837
842 if use_dirstate or update_dirstate:
838 if use_dirstate or update_dirstate:
843 self.dirstate.setparents(n)
839 self.dirstate.setparents(n)
844 if use_dirstate:
840 if use_dirstate:
845 for f in removed:
841 for f in removed:
846 self.dirstate.forget(f)
842 self.dirstate.forget(f)
847 valid = 1 # our dirstate updates are complete
843 valid = 1 # our dirstate updates are complete
848
844
849 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
845 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
850 return n
846 return n
851 finally:
847 finally:
852 if not valid: # don't save our updated dirstate
848 if not valid: # don't save our updated dirstate
853 self.dirstate.invalidate()
849 self.dirstate.invalidate()
854 del tr, lock, wlock
850 del tr, lock, wlock
855
851
856 def walk(self, node=None, files=[], match=util.always, badmatch=None):
852 def walk(self, node=None, files=[], match=util.always, badmatch=None):
857 '''
853 '''
858 walk recursively through the directory tree or a given
854 walk recursively through the directory tree or a given
859 changeset, finding all files matched by the match
855 changeset, finding all files matched by the match
860 function
856 function
861
857
862 results are yielded in a tuple (src, filename), where src
858 results are yielded in a tuple (src, filename), where src
863 is one of:
859 is one of:
864 'f' the file was found in the directory tree
860 'f' the file was found in the directory tree
865 'm' the file was only in the dirstate and not in the tree
861 'm' the file was only in the dirstate and not in the tree
866 'b' file was not found and matched badmatch
862 'b' file was not found and matched badmatch
867 '''
863 '''
868
864
869 if node:
865 if node:
870 fdict = dict.fromkeys(files)
866 fdict = dict.fromkeys(files)
871 # for dirstate.walk, files=['.'] means "walk the whole tree".
867 # for dirstate.walk, files=['.'] means "walk the whole tree".
872 # follow that here, too
868 # follow that here, too
873 fdict.pop('.', None)
869 fdict.pop('.', None)
874 mdict = self.manifest.read(self.changelog.read(node)[0])
870 mdict = self.manifest.read(self.changelog.read(node)[0])
875 mfiles = mdict.keys()
871 mfiles = mdict.keys()
876 mfiles.sort()
872 mfiles.sort()
877 for fn in mfiles:
873 for fn in mfiles:
878 for ffn in fdict:
874 for ffn in fdict:
879 # match if the file is the exact name or a directory
875 # match if the file is the exact name or a directory
880 if ffn == fn or fn.startswith("%s/" % ffn):
876 if ffn == fn or fn.startswith("%s/" % ffn):
881 del fdict[ffn]
877 del fdict[ffn]
882 break
878 break
883 if match(fn):
879 if match(fn):
884 yield 'm', fn
880 yield 'm', fn
885 ffiles = fdict.keys()
881 ffiles = fdict.keys()
886 ffiles.sort()
882 ffiles.sort()
887 for fn in ffiles:
883 for fn in ffiles:
888 if badmatch and badmatch(fn):
884 if badmatch and badmatch(fn):
889 if match(fn):
885 if match(fn):
890 yield 'b', fn
886 yield 'b', fn
891 else:
887 else:
892 self.ui.warn(_('%s: No such file in rev %s\n')
888 self.ui.warn(_('%s: No such file in rev %s\n')
893 % (self.pathto(fn), short(node)))
889 % (self.pathto(fn), short(node)))
894 else:
890 else:
895 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
891 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
896 yield src, fn
892 yield src, fn
897
893
898 def status(self, node1=None, node2=None, files=[], match=util.always,
894 def status(self, node1=None, node2=None, files=[], match=util.always,
899 list_ignored=False, list_clean=False):
895 list_ignored=False, list_clean=False):
900 """return status of files between two nodes or node and working directory
896 """return status of files between two nodes or node and working directory
901
897
902 If node1 is None, use the first dirstate parent instead.
898 If node1 is None, use the first dirstate parent instead.
903 If node2 is None, compare node1 with working directory.
899 If node2 is None, compare node1 with working directory.
904 """
900 """
905
901
906 def fcmp(fn, getnode):
902 def fcmp(fn, getnode):
907 t1 = self.wread(fn)
903 t1 = self.wread(fn)
908 return self.file(fn).cmp(getnode(fn), t1)
904 return self.file(fn).cmp(getnode(fn), t1)
909
905
910 def mfmatches(node):
906 def mfmatches(node):
911 change = self.changelog.read(node)
907 change = self.changelog.read(node)
912 mf = self.manifest.read(change[0]).copy()
908 mf = self.manifest.read(change[0]).copy()
913 for fn in mf.keys():
909 for fn in mf.keys():
914 if not match(fn):
910 if not match(fn):
915 del mf[fn]
911 del mf[fn]
916 return mf
912 return mf
917
913
918 modified, added, removed, deleted, unknown = [], [], [], [], []
914 modified, added, removed, deleted, unknown = [], [], [], [], []
919 ignored, clean = [], []
915 ignored, clean = [], []
920
916
921 compareworking = False
917 compareworking = False
922 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
918 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
923 compareworking = True
919 compareworking = True
924
920
925 if not compareworking:
921 if not compareworking:
926 # read the manifest from node1 before the manifest from node2,
922 # read the manifest from node1 before the manifest from node2,
927 # so that we'll hit the manifest cache if we're going through
923 # so that we'll hit the manifest cache if we're going through
928 # all the revisions in parent->child order.
924 # all the revisions in parent->child order.
929 mf1 = mfmatches(node1)
925 mf1 = mfmatches(node1)
930
926
931 # are we comparing the working directory?
927 # are we comparing the working directory?
932 if not node2:
928 if not node2:
933 (lookup, modified, added, removed, deleted, unknown,
929 (lookup, modified, added, removed, deleted, unknown,
934 ignored, clean) = self.dirstate.status(files, match,
930 ignored, clean) = self.dirstate.status(files, match,
935 list_ignored, list_clean)
931 list_ignored, list_clean)
936
932
937 # are we comparing working dir against its parent?
933 # are we comparing working dir against its parent?
938 if compareworking:
934 if compareworking:
939 if lookup:
935 if lookup:
940 fixup = []
936 fixup = []
941 # do a full compare of any files that might have changed
937 # do a full compare of any files that might have changed
942 ctx = self.changectx()
938 ctx = self.changectx()
943 for f in lookup:
939 for f in lookup:
944 if f not in ctx or ctx[f].cmp(self.wread(f)):
940 if f not in ctx or ctx[f].cmp(self.wread(f)):
945 modified.append(f)
941 modified.append(f)
946 else:
942 else:
947 fixup.append(f)
943 fixup.append(f)
948 if list_clean:
944 if list_clean:
949 clean.append(f)
945 clean.append(f)
950
946
951 # update dirstate for files that are actually clean
947 # update dirstate for files that are actually clean
952 if fixup:
948 if fixup:
953 wlock = None
949 wlock = None
954 try:
950 try:
955 try:
951 try:
956 wlock = self.wlock(False)
952 wlock = self.wlock(False)
957 except lock.LockException:
953 except lock.LockException:
958 pass
954 pass
959 if wlock:
955 if wlock:
960 for f in fixup:
956 for f in fixup:
961 self.dirstate.normal(f)
957 self.dirstate.normal(f)
962 finally:
958 finally:
963 del wlock
959 del wlock
964 else:
960 else:
965 # we are comparing working dir against non-parent
961 # we are comparing working dir against non-parent
966 # generate a pseudo-manifest for the working dir
962 # generate a pseudo-manifest for the working dir
967 # XXX: create it in dirstate.py ?
963 # XXX: create it in dirstate.py ?
968 mf2 = mfmatches(self.dirstate.parents()[0])
964 mf2 = mfmatches(self.dirstate.parents()[0])
969 is_exec = util.execfunc(self.root, mf2.execf)
965 is_exec = util.execfunc(self.root, mf2.execf)
970 is_link = util.linkfunc(self.root, mf2.linkf)
966 is_link = util.linkfunc(self.root, mf2.linkf)
971 for f in lookup + modified + added:
967 for f in lookup + modified + added:
972 mf2[f] = ""
968 mf2[f] = ""
973 mf2.set(f, is_exec(f), is_link(f))
969 mf2.set(f, is_exec(f), is_link(f))
974 for f in removed:
970 for f in removed:
975 if f in mf2:
971 if f in mf2:
976 del mf2[f]
972 del mf2[f]
977
973
978 else:
974 else:
979 # we are comparing two revisions
975 # we are comparing two revisions
980 mf2 = mfmatches(node2)
976 mf2 = mfmatches(node2)
981
977
982 if not compareworking:
978 if not compareworking:
983 # flush lists from dirstate before comparing manifests
979 # flush lists from dirstate before comparing manifests
984 modified, added, clean = [], [], []
980 modified, added, clean = [], [], []
985
981
986 # make sure to sort the files so we talk to the disk in a
982 # make sure to sort the files so we talk to the disk in a
987 # reasonable order
983 # reasonable order
988 mf2keys = mf2.keys()
984 mf2keys = mf2.keys()
989 mf2keys.sort()
985 mf2keys.sort()
990 getnode = lambda fn: mf1.get(fn, nullid)
986 getnode = lambda fn: mf1.get(fn, nullid)
991 for fn in mf2keys:
987 for fn in mf2keys:
992 if mf1.has_key(fn):
988 if mf1.has_key(fn):
993 if (mf1.flags(fn) != mf2.flags(fn) or
989 if (mf1.flags(fn) != mf2.flags(fn) or
994 (mf1[fn] != mf2[fn] and
990 (mf1[fn] != mf2[fn] and
995 (mf2[fn] != "" or fcmp(fn, getnode)))):
991 (mf2[fn] != "" or fcmp(fn, getnode)))):
996 modified.append(fn)
992 modified.append(fn)
997 elif list_clean:
993 elif list_clean:
998 clean.append(fn)
994 clean.append(fn)
999 del mf1[fn]
995 del mf1[fn]
1000 else:
996 else:
1001 added.append(fn)
997 added.append(fn)
1002
998
1003 removed = mf1.keys()
999 removed = mf1.keys()
1004
1000
1005 # sort and return results:
1001 # sort and return results:
1006 for l in modified, added, removed, deleted, unknown, ignored, clean:
1002 for l in modified, added, removed, deleted, unknown, ignored, clean:
1007 l.sort()
1003 l.sort()
1008 return (modified, added, removed, deleted, unknown, ignored, clean)
1004 return (modified, added, removed, deleted, unknown, ignored, clean)
1009
1005
1010 def add(self, list):
1006 def add(self, list):
1011 wlock = self.wlock()
1007 wlock = self.wlock()
1012 try:
1008 try:
1013 rejected = []
1009 rejected = []
1014 for f in list:
1010 for f in list:
1015 p = self.wjoin(f)
1011 p = self.wjoin(f)
1016 try:
1012 try:
1017 st = os.lstat(p)
1013 st = os.lstat(p)
1018 except:
1014 except:
1019 self.ui.warn(_("%s does not exist!\n") % f)
1015 self.ui.warn(_("%s does not exist!\n") % f)
1020 rejected.append(f)
1016 rejected.append(f)
1021 continue
1017 continue
1022 if st.st_size > 10000000:
1018 if st.st_size > 10000000:
1023 self.ui.warn(_("%s: files over 10MB may cause memory and"
1019 self.ui.warn(_("%s: files over 10MB may cause memory and"
1024 " performance problems\n"
1020 " performance problems\n"
1025 "(use 'hg revert %s' to unadd the file)\n")
1021 "(use 'hg revert %s' to unadd the file)\n")
1026 % (f, f))
1022 % (f, f))
1027 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1023 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1028 self.ui.warn(_("%s not added: only files and symlinks "
1024 self.ui.warn(_("%s not added: only files and symlinks "
1029 "supported currently\n") % f)
1025 "supported currently\n") % f)
1030 rejected.append(p)
1026 rejected.append(p)
1031 elif self.dirstate[f] in 'amn':
1027 elif self.dirstate[f] in 'amn':
1032 self.ui.warn(_("%s already tracked!\n") % f)
1028 self.ui.warn(_("%s already tracked!\n") % f)
1033 elif self.dirstate[f] == 'r':
1029 elif self.dirstate[f] == 'r':
1034 self.dirstate.normallookup(f)
1030 self.dirstate.normallookup(f)
1035 else:
1031 else:
1036 self.dirstate.add(f)
1032 self.dirstate.add(f)
1037 return rejected
1033 return rejected
1038 finally:
1034 finally:
1039 del wlock
1035 del wlock
1040
1036
1041 def forget(self, list):
1037 def forget(self, list):
1042 wlock = self.wlock()
1038 wlock = self.wlock()
1043 try:
1039 try:
1044 for f in list:
1040 for f in list:
1045 if self.dirstate[f] != 'a':
1041 if self.dirstate[f] != 'a':
1046 self.ui.warn(_("%s not added!\n") % f)
1042 self.ui.warn(_("%s not added!\n") % f)
1047 else:
1043 else:
1048 self.dirstate.forget(f)
1044 self.dirstate.forget(f)
1049 finally:
1045 finally:
1050 del wlock
1046 del wlock
1051
1047
1052 def remove(self, list, unlink=False):
1048 def remove(self, list, unlink=False):
1053 wlock = None
1049 wlock = None
1054 try:
1050 try:
1055 if unlink:
1051 if unlink:
1056 for f in list:
1052 for f in list:
1057 try:
1053 try:
1058 util.unlink(self.wjoin(f))
1054 util.unlink(self.wjoin(f))
1059 except OSError, inst:
1055 except OSError, inst:
1060 if inst.errno != errno.ENOENT:
1056 if inst.errno != errno.ENOENT:
1061 raise
1057 raise
1062 wlock = self.wlock()
1058 wlock = self.wlock()
1063 for f in list:
1059 for f in list:
1064 if unlink and os.path.exists(self.wjoin(f)):
1060 if unlink and os.path.exists(self.wjoin(f)):
1065 self.ui.warn(_("%s still exists!\n") % f)
1061 self.ui.warn(_("%s still exists!\n") % f)
1066 elif self.dirstate[f] == 'a':
1062 elif self.dirstate[f] == 'a':
1067 self.dirstate.forget(f)
1063 self.dirstate.forget(f)
1068 elif f not in self.dirstate:
1064 elif f not in self.dirstate:
1069 self.ui.warn(_("%s not tracked!\n") % f)
1065 self.ui.warn(_("%s not tracked!\n") % f)
1070 else:
1066 else:
1071 self.dirstate.remove(f)
1067 self.dirstate.remove(f)
1072 finally:
1068 finally:
1073 del wlock
1069 del wlock
1074
1070
1075 def undelete(self, list):
1071 def undelete(self, list):
1076 wlock = None
1072 wlock = None
1077 try:
1073 try:
1078 manifests = [self.manifest.read(self.changelog.read(p)[0])
1074 manifests = [self.manifest.read(self.changelog.read(p)[0])
1079 for p in self.dirstate.parents() if p != nullid]
1075 for p in self.dirstate.parents() if p != nullid]
1080 wlock = self.wlock()
1076 wlock = self.wlock()
1081 for f in list:
1077 for f in list:
1082 if self.dirstate[f] != 'r':
1078 if self.dirstate[f] != 'r':
1083 self.ui.warn("%s not removed!\n" % f)
1079 self.ui.warn("%s not removed!\n" % f)
1084 else:
1080 else:
1085 m = f in manifests[0] and manifests[0] or manifests[1]
1081 m = f in manifests[0] and manifests[0] or manifests[1]
1086 t = self.file(f).read(m[f])
1082 t = self.file(f).read(m[f])
1087 self.wwrite(f, t, m.flags(f))
1083 self.wwrite(f, t, m.flags(f))
1088 self.dirstate.normal(f)
1084 self.dirstate.normal(f)
1089 finally:
1085 finally:
1090 del wlock
1086 del wlock
1091
1087
1092 def copy(self, source, dest):
1088 def copy(self, source, dest):
1093 wlock = None
1089 wlock = None
1094 try:
1090 try:
1095 p = self.wjoin(dest)
1091 p = self.wjoin(dest)
1096 if not (os.path.exists(p) or os.path.islink(p)):
1092 if not (os.path.exists(p) or os.path.islink(p)):
1097 self.ui.warn(_("%s does not exist!\n") % dest)
1093 self.ui.warn(_("%s does not exist!\n") % dest)
1098 elif not (os.path.isfile(p) or os.path.islink(p)):
1094 elif not (os.path.isfile(p) or os.path.islink(p)):
1099 self.ui.warn(_("copy failed: %s is not a file or a "
1095 self.ui.warn(_("copy failed: %s is not a file or a "
1100 "symbolic link\n") % dest)
1096 "symbolic link\n") % dest)
1101 else:
1097 else:
1102 wlock = self.wlock()
1098 wlock = self.wlock()
1103 if dest not in self.dirstate:
1099 if dest not in self.dirstate:
1104 self.dirstate.add(dest)
1100 self.dirstate.add(dest)
1105 self.dirstate.copy(source, dest)
1101 self.dirstate.copy(source, dest)
1106 finally:
1102 finally:
1107 del wlock
1103 del wlock
1108
1104
1109 def heads(self, start=None):
1105 def heads(self, start=None):
1110 heads = self.changelog.heads(start)
1106 heads = self.changelog.heads(start)
1111 # sort the output in rev descending order
1107 # sort the output in rev descending order
1112 heads = [(-self.changelog.rev(h), h) for h in heads]
1108 heads = [(-self.changelog.rev(h), h) for h in heads]
1113 heads.sort()
1109 heads.sort()
1114 return [n for (r, n) in heads]
1110 return [n for (r, n) in heads]
1115
1111
1116 def branchheads(self, branch, start=None):
1112 def branchheads(self, branch, start=None):
1117 branches = self.branchtags()
1113 branches = self.branchtags()
1118 if branch not in branches:
1114 if branch not in branches:
1119 return []
1115 return []
1120 # The basic algorithm is this:
1116 # The basic algorithm is this:
1121 #
1117 #
1122 # Start from the branch tip since there are no later revisions that can
1118 # Start from the branch tip since there are no later revisions that can
1123 # possibly be in this branch, and the tip is a guaranteed head.
1119 # possibly be in this branch, and the tip is a guaranteed head.
1124 #
1120 #
1125 # Remember the tip's parents as the first ancestors, since these by
1121 # Remember the tip's parents as the first ancestors, since these by
1126 # definition are not heads.
1122 # definition are not heads.
1127 #
1123 #
1128 # Step backwards from the brach tip through all the revisions. We are
1124 # Step backwards from the brach tip through all the revisions. We are
1129 # guaranteed by the rules of Mercurial that we will now be visiting the
1125 # guaranteed by the rules of Mercurial that we will now be visiting the
1130 # nodes in reverse topological order (children before parents).
1126 # nodes in reverse topological order (children before parents).
1131 #
1127 #
1132 # If a revision is one of the ancestors of a head then we can toss it
1128 # If a revision is one of the ancestors of a head then we can toss it
1133 # out of the ancestors set (we've already found it and won't be
1129 # out of the ancestors set (we've already found it and won't be
1134 # visiting it again) and put its parents in the ancestors set.
1130 # visiting it again) and put its parents in the ancestors set.
1135 #
1131 #
1136 # Otherwise, if a revision is in the branch it's another head, since it
1132 # Otherwise, if a revision is in the branch it's another head, since it
1137 # wasn't in the ancestor list of an existing head. So add it to the
1133 # wasn't in the ancestor list of an existing head. So add it to the
1138 # head list, and add its parents to the ancestor list.
1134 # head list, and add its parents to the ancestor list.
1139 #
1135 #
1140 # If it is not in the branch ignore it.
1136 # If it is not in the branch ignore it.
1141 #
1137 #
1142 # Once we have a list of heads, use nodesbetween to filter out all the
1138 # Once we have a list of heads, use nodesbetween to filter out all the
1143 # heads that cannot be reached from startrev. There may be a more
1139 # heads that cannot be reached from startrev. There may be a more
1144 # efficient way to do this as part of the previous algorithm.
1140 # efficient way to do this as part of the previous algorithm.
1145
1141
1146 set = util.set
1142 set = util.set
1147 heads = [self.changelog.rev(branches[branch])]
1143 heads = [self.changelog.rev(branches[branch])]
1148 # Don't care if ancestors contains nullrev or not.
1144 # Don't care if ancestors contains nullrev or not.
1149 ancestors = set(self.changelog.parentrevs(heads[0]))
1145 ancestors = set(self.changelog.parentrevs(heads[0]))
1150 for rev in xrange(heads[0] - 1, nullrev, -1):
1146 for rev in xrange(heads[0] - 1, nullrev, -1):
1151 if rev in ancestors:
1147 if rev in ancestors:
1152 ancestors.update(self.changelog.parentrevs(rev))
1148 ancestors.update(self.changelog.parentrevs(rev))
1153 ancestors.remove(rev)
1149 ancestors.remove(rev)
1154 elif self.changectx(rev).branch() == branch:
1150 elif self.changectx(rev).branch() == branch:
1155 heads.append(rev)
1151 heads.append(rev)
1156 ancestors.update(self.changelog.parentrevs(rev))
1152 ancestors.update(self.changelog.parentrevs(rev))
1157 heads = [self.changelog.node(rev) for rev in heads]
1153 heads = [self.changelog.node(rev) for rev in heads]
1158 if start is not None:
1154 if start is not None:
1159 heads = self.changelog.nodesbetween([start], heads)[2]
1155 heads = self.changelog.nodesbetween([start], heads)[2]
1160 return heads
1156 return heads
1161
1157
1162 def branches(self, nodes):
1158 def branches(self, nodes):
1163 if not nodes:
1159 if not nodes:
1164 nodes = [self.changelog.tip()]
1160 nodes = [self.changelog.tip()]
1165 b = []
1161 b = []
1166 for n in nodes:
1162 for n in nodes:
1167 t = n
1163 t = n
1168 while 1:
1164 while 1:
1169 p = self.changelog.parents(n)
1165 p = self.changelog.parents(n)
1170 if p[1] != nullid or p[0] == nullid:
1166 if p[1] != nullid or p[0] == nullid:
1171 b.append((t, n, p[0], p[1]))
1167 b.append((t, n, p[0], p[1]))
1172 break
1168 break
1173 n = p[0]
1169 n = p[0]
1174 return b
1170 return b
1175
1171
1176 def between(self, pairs):
1172 def between(self, pairs):
1177 r = []
1173 r = []
1178
1174
1179 for top, bottom in pairs:
1175 for top, bottom in pairs:
1180 n, l, i = top, [], 0
1176 n, l, i = top, [], 0
1181 f = 1
1177 f = 1
1182
1178
1183 while n != bottom:
1179 while n != bottom:
1184 p = self.changelog.parents(n)[0]
1180 p = self.changelog.parents(n)[0]
1185 if i == f:
1181 if i == f:
1186 l.append(n)
1182 l.append(n)
1187 f = f * 2
1183 f = f * 2
1188 n = p
1184 n = p
1189 i += 1
1185 i += 1
1190
1186
1191 r.append(l)
1187 r.append(l)
1192
1188
1193 return r
1189 return r
1194
1190
1195 def findincoming(self, remote, base=None, heads=None, force=False):
1191 def findincoming(self, remote, base=None, heads=None, force=False):
1196 """Return list of roots of the subsets of missing nodes from remote
1192 """Return list of roots of the subsets of missing nodes from remote
1197
1193
1198 If base dict is specified, assume that these nodes and their parents
1194 If base dict is specified, assume that these nodes and their parents
1199 exist on the remote side and that no child of a node of base exists
1195 exist on the remote side and that no child of a node of base exists
1200 in both remote and self.
1196 in both remote and self.
1201 Furthermore base will be updated to include the nodes that exists
1197 Furthermore base will be updated to include the nodes that exists
1202 in self and remote but no children exists in self and remote.
1198 in self and remote but no children exists in self and remote.
1203 If a list of heads is specified, return only nodes which are heads
1199 If a list of heads is specified, return only nodes which are heads
1204 or ancestors of these heads.
1200 or ancestors of these heads.
1205
1201
1206 All the ancestors of base are in self and in remote.
1202 All the ancestors of base are in self and in remote.
1207 All the descendants of the list returned are missing in self.
1203 All the descendants of the list returned are missing in self.
1208 (and so we know that the rest of the nodes are missing in remote, see
1204 (and so we know that the rest of the nodes are missing in remote, see
1209 outgoing)
1205 outgoing)
1210 """
1206 """
1211 m = self.changelog.nodemap
1207 m = self.changelog.nodemap
1212 search = []
1208 search = []
1213 fetch = {}
1209 fetch = {}
1214 seen = {}
1210 seen = {}
1215 seenbranch = {}
1211 seenbranch = {}
1216 if base == None:
1212 if base == None:
1217 base = {}
1213 base = {}
1218
1214
1219 if not heads:
1215 if not heads:
1220 heads = remote.heads()
1216 heads = remote.heads()
1221
1217
1222 if self.changelog.tip() == nullid:
1218 if self.changelog.tip() == nullid:
1223 base[nullid] = 1
1219 base[nullid] = 1
1224 if heads != [nullid]:
1220 if heads != [nullid]:
1225 return [nullid]
1221 return [nullid]
1226 return []
1222 return []
1227
1223
1228 # assume we're closer to the tip than the root
1224 # assume we're closer to the tip than the root
1229 # and start by examining the heads
1225 # and start by examining the heads
1230 self.ui.status(_("searching for changes\n"))
1226 self.ui.status(_("searching for changes\n"))
1231
1227
1232 unknown = []
1228 unknown = []
1233 for h in heads:
1229 for h in heads:
1234 if h not in m:
1230 if h not in m:
1235 unknown.append(h)
1231 unknown.append(h)
1236 else:
1232 else:
1237 base[h] = 1
1233 base[h] = 1
1238
1234
1239 if not unknown:
1235 if not unknown:
1240 return []
1236 return []
1241
1237
1242 req = dict.fromkeys(unknown)
1238 req = dict.fromkeys(unknown)
1243 reqcnt = 0
1239 reqcnt = 0
1244
1240
1245 # search through remote branches
1241 # search through remote branches
1246 # a 'branch' here is a linear segment of history, with four parts:
1242 # a 'branch' here is a linear segment of history, with four parts:
1247 # head, root, first parent, second parent
1243 # head, root, first parent, second parent
1248 # (a branch always has two parents (or none) by definition)
1244 # (a branch always has two parents (or none) by definition)
1249 unknown = remote.branches(unknown)
1245 unknown = remote.branches(unknown)
1250 while unknown:
1246 while unknown:
1251 r = []
1247 r = []
1252 while unknown:
1248 while unknown:
1253 n = unknown.pop(0)
1249 n = unknown.pop(0)
1254 if n[0] in seen:
1250 if n[0] in seen:
1255 continue
1251 continue
1256
1252
1257 self.ui.debug(_("examining %s:%s\n")
1253 self.ui.debug(_("examining %s:%s\n")
1258 % (short(n[0]), short(n[1])))
1254 % (short(n[0]), short(n[1])))
1259 if n[0] == nullid: # found the end of the branch
1255 if n[0] == nullid: # found the end of the branch
1260 pass
1256 pass
1261 elif n in seenbranch:
1257 elif n in seenbranch:
1262 self.ui.debug(_("branch already found\n"))
1258 self.ui.debug(_("branch already found\n"))
1263 continue
1259 continue
1264 elif n[1] and n[1] in m: # do we know the base?
1260 elif n[1] and n[1] in m: # do we know the base?
1265 self.ui.debug(_("found incomplete branch %s:%s\n")
1261 self.ui.debug(_("found incomplete branch %s:%s\n")
1266 % (short(n[0]), short(n[1])))
1262 % (short(n[0]), short(n[1])))
1267 search.append(n) # schedule branch range for scanning
1263 search.append(n) # schedule branch range for scanning
1268 seenbranch[n] = 1
1264 seenbranch[n] = 1
1269 else:
1265 else:
1270 if n[1] not in seen and n[1] not in fetch:
1266 if n[1] not in seen and n[1] not in fetch:
1271 if n[2] in m and n[3] in m:
1267 if n[2] in m and n[3] in m:
1272 self.ui.debug(_("found new changeset %s\n") %
1268 self.ui.debug(_("found new changeset %s\n") %
1273 short(n[1]))
1269 short(n[1]))
1274 fetch[n[1]] = 1 # earliest unknown
1270 fetch[n[1]] = 1 # earliest unknown
1275 for p in n[2:4]:
1271 for p in n[2:4]:
1276 if p in m:
1272 if p in m:
1277 base[p] = 1 # latest known
1273 base[p] = 1 # latest known
1278
1274
1279 for p in n[2:4]:
1275 for p in n[2:4]:
1280 if p not in req and p not in m:
1276 if p not in req and p not in m:
1281 r.append(p)
1277 r.append(p)
1282 req[p] = 1
1278 req[p] = 1
1283 seen[n[0]] = 1
1279 seen[n[0]] = 1
1284
1280
1285 if r:
1281 if r:
1286 reqcnt += 1
1282 reqcnt += 1
1287 self.ui.debug(_("request %d: %s\n") %
1283 self.ui.debug(_("request %d: %s\n") %
1288 (reqcnt, " ".join(map(short, r))))
1284 (reqcnt, " ".join(map(short, r))))
1289 for p in xrange(0, len(r), 10):
1285 for p in xrange(0, len(r), 10):
1290 for b in remote.branches(r[p:p+10]):
1286 for b in remote.branches(r[p:p+10]):
1291 self.ui.debug(_("received %s:%s\n") %
1287 self.ui.debug(_("received %s:%s\n") %
1292 (short(b[0]), short(b[1])))
1288 (short(b[0]), short(b[1])))
1293 unknown.append(b)
1289 unknown.append(b)
1294
1290
1295 # do binary search on the branches we found
1291 # do binary search on the branches we found
1296 while search:
1292 while search:
1297 n = search.pop(0)
1293 n = search.pop(0)
1298 reqcnt += 1
1294 reqcnt += 1
1299 l = remote.between([(n[0], n[1])])[0]
1295 l = remote.between([(n[0], n[1])])[0]
1300 l.append(n[1])
1296 l.append(n[1])
1301 p = n[0]
1297 p = n[0]
1302 f = 1
1298 f = 1
1303 for i in l:
1299 for i in l:
1304 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1300 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1305 if i in m:
1301 if i in m:
1306 if f <= 2:
1302 if f <= 2:
1307 self.ui.debug(_("found new branch changeset %s\n") %
1303 self.ui.debug(_("found new branch changeset %s\n") %
1308 short(p))
1304 short(p))
1309 fetch[p] = 1
1305 fetch[p] = 1
1310 base[i] = 1
1306 base[i] = 1
1311 else:
1307 else:
1312 self.ui.debug(_("narrowed branch search to %s:%s\n")
1308 self.ui.debug(_("narrowed branch search to %s:%s\n")
1313 % (short(p), short(i)))
1309 % (short(p), short(i)))
1314 search.append((p, i))
1310 search.append((p, i))
1315 break
1311 break
1316 p, f = i, f * 2
1312 p, f = i, f * 2
1317
1313
1318 # sanity check our fetch list
1314 # sanity check our fetch list
1319 for f in fetch.keys():
1315 for f in fetch.keys():
1320 if f in m:
1316 if f in m:
1321 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1317 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1322
1318
1323 if base.keys() == [nullid]:
1319 if base.keys() == [nullid]:
1324 if force:
1320 if force:
1325 self.ui.warn(_("warning: repository is unrelated\n"))
1321 self.ui.warn(_("warning: repository is unrelated\n"))
1326 else:
1322 else:
1327 raise util.Abort(_("repository is unrelated"))
1323 raise util.Abort(_("repository is unrelated"))
1328
1324
1329 self.ui.debug(_("found new changesets starting at ") +
1325 self.ui.debug(_("found new changesets starting at ") +
1330 " ".join([short(f) for f in fetch]) + "\n")
1326 " ".join([short(f) for f in fetch]) + "\n")
1331
1327
1332 self.ui.debug(_("%d total queries\n") % reqcnt)
1328 self.ui.debug(_("%d total queries\n") % reqcnt)
1333
1329
1334 return fetch.keys()
1330 return fetch.keys()
1335
1331
1336 def findoutgoing(self, remote, base=None, heads=None, force=False):
1332 def findoutgoing(self, remote, base=None, heads=None, force=False):
1337 """Return list of nodes that are roots of subsets not in remote
1333 """Return list of nodes that are roots of subsets not in remote
1338
1334
1339 If base dict is specified, assume that these nodes and their parents
1335 If base dict is specified, assume that these nodes and their parents
1340 exist on the remote side.
1336 exist on the remote side.
1341 If a list of heads is specified, return only nodes which are heads
1337 If a list of heads is specified, return only nodes which are heads
1342 or ancestors of these heads, and return a second element which
1338 or ancestors of these heads, and return a second element which
1343 contains all remote heads which get new children.
1339 contains all remote heads which get new children.
1344 """
1340 """
1345 if base == None:
1341 if base == None:
1346 base = {}
1342 base = {}
1347 self.findincoming(remote, base, heads, force=force)
1343 self.findincoming(remote, base, heads, force=force)
1348
1344
1349 self.ui.debug(_("common changesets up to ")
1345 self.ui.debug(_("common changesets up to ")
1350 + " ".join(map(short, base.keys())) + "\n")
1346 + " ".join(map(short, base.keys())) + "\n")
1351
1347
1352 remain = dict.fromkeys(self.changelog.nodemap)
1348 remain = dict.fromkeys(self.changelog.nodemap)
1353
1349
1354 # prune everything remote has from the tree
1350 # prune everything remote has from the tree
1355 del remain[nullid]
1351 del remain[nullid]
1356 remove = base.keys()
1352 remove = base.keys()
1357 while remove:
1353 while remove:
1358 n = remove.pop(0)
1354 n = remove.pop(0)
1359 if n in remain:
1355 if n in remain:
1360 del remain[n]
1356 del remain[n]
1361 for p in self.changelog.parents(n):
1357 for p in self.changelog.parents(n):
1362 remove.append(p)
1358 remove.append(p)
1363
1359
1364 # find every node whose parents have been pruned
1360 # find every node whose parents have been pruned
1365 subset = []
1361 subset = []
1366 # find every remote head that will get new children
1362 # find every remote head that will get new children
1367 updated_heads = {}
1363 updated_heads = {}
1368 for n in remain:
1364 for n in remain:
1369 p1, p2 = self.changelog.parents(n)
1365 p1, p2 = self.changelog.parents(n)
1370 if p1 not in remain and p2 not in remain:
1366 if p1 not in remain and p2 not in remain:
1371 subset.append(n)
1367 subset.append(n)
1372 if heads:
1368 if heads:
1373 if p1 in heads:
1369 if p1 in heads:
1374 updated_heads[p1] = True
1370 updated_heads[p1] = True
1375 if p2 in heads:
1371 if p2 in heads:
1376 updated_heads[p2] = True
1372 updated_heads[p2] = True
1377
1373
1378 # this is the set of all roots we have to push
1374 # this is the set of all roots we have to push
1379 if heads:
1375 if heads:
1380 return subset, updated_heads.keys()
1376 return subset, updated_heads.keys()
1381 else:
1377 else:
1382 return subset
1378 return subset
1383
1379
1384 def pull(self, remote, heads=None, force=False):
1380 def pull(self, remote, heads=None, force=False):
1385 lock = self.lock()
1381 lock = self.lock()
1386 try:
1382 try:
1387 fetch = self.findincoming(remote, heads=heads, force=force)
1383 fetch = self.findincoming(remote, heads=heads, force=force)
1388 if fetch == [nullid]:
1384 if fetch == [nullid]:
1389 self.ui.status(_("requesting all changes\n"))
1385 self.ui.status(_("requesting all changes\n"))
1390
1386
1391 if not fetch:
1387 if not fetch:
1392 self.ui.status(_("no changes found\n"))
1388 self.ui.status(_("no changes found\n"))
1393 return 0
1389 return 0
1394
1390
1395 if heads is None:
1391 if heads is None:
1396 cg = remote.changegroup(fetch, 'pull')
1392 cg = remote.changegroup(fetch, 'pull')
1397 else:
1393 else:
1398 if 'changegroupsubset' not in remote.capabilities:
1394 if 'changegroupsubset' not in remote.capabilities:
1399 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1395 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1400 cg = remote.changegroupsubset(fetch, heads, 'pull')
1396 cg = remote.changegroupsubset(fetch, heads, 'pull')
1401 return self.addchangegroup(cg, 'pull', remote.url())
1397 return self.addchangegroup(cg, 'pull', remote.url())
1402 finally:
1398 finally:
1403 del lock
1399 del lock
1404
1400
1405 def push(self, remote, force=False, revs=None):
1401 def push(self, remote, force=False, revs=None):
1406 # there are two ways to push to remote repo:
1402 # there are two ways to push to remote repo:
1407 #
1403 #
1408 # addchangegroup assumes local user can lock remote
1404 # addchangegroup assumes local user can lock remote
1409 # repo (local filesystem, old ssh servers).
1405 # repo (local filesystem, old ssh servers).
1410 #
1406 #
1411 # unbundle assumes local user cannot lock remote repo (new ssh
1407 # unbundle assumes local user cannot lock remote repo (new ssh
1412 # servers, http servers).
1408 # servers, http servers).
1413
1409
1414 if remote.capable('unbundle'):
1410 if remote.capable('unbundle'):
1415 return self.push_unbundle(remote, force, revs)
1411 return self.push_unbundle(remote, force, revs)
1416 return self.push_addchangegroup(remote, force, revs)
1412 return self.push_addchangegroup(remote, force, revs)
1417
1413
1418 def prepush(self, remote, force, revs):
1414 def prepush(self, remote, force, revs):
1419 base = {}
1415 base = {}
1420 remote_heads = remote.heads()
1416 remote_heads = remote.heads()
1421 inc = self.findincoming(remote, base, remote_heads, force=force)
1417 inc = self.findincoming(remote, base, remote_heads, force=force)
1422
1418
1423 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1419 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1424 if revs is not None:
1420 if revs is not None:
1425 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1421 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1426 else:
1422 else:
1427 bases, heads = update, self.changelog.heads()
1423 bases, heads = update, self.changelog.heads()
1428
1424
1429 if not bases:
1425 if not bases:
1430 self.ui.status(_("no changes found\n"))
1426 self.ui.status(_("no changes found\n"))
1431 return None, 1
1427 return None, 1
1432 elif not force:
1428 elif not force:
1433 # check if we're creating new remote heads
1429 # check if we're creating new remote heads
1434 # to be a remote head after push, node must be either
1430 # to be a remote head after push, node must be either
1435 # - unknown locally
1431 # - unknown locally
1436 # - a local outgoing head descended from update
1432 # - a local outgoing head descended from update
1437 # - a remote head that's known locally and not
1433 # - a remote head that's known locally and not
1438 # ancestral to an outgoing head
1434 # ancestral to an outgoing head
1439
1435
1440 warn = 0
1436 warn = 0
1441
1437
1442 if remote_heads == [nullid]:
1438 if remote_heads == [nullid]:
1443 warn = 0
1439 warn = 0
1444 elif not revs and len(heads) > len(remote_heads):
1440 elif not revs and len(heads) > len(remote_heads):
1445 warn = 1
1441 warn = 1
1446 else:
1442 else:
1447 newheads = list(heads)
1443 newheads = list(heads)
1448 for r in remote_heads:
1444 for r in remote_heads:
1449 if r in self.changelog.nodemap:
1445 if r in self.changelog.nodemap:
1450 desc = self.changelog.heads(r, heads)
1446 desc = self.changelog.heads(r, heads)
1451 l = [h for h in heads if h in desc]
1447 l = [h for h in heads if h in desc]
1452 if not l:
1448 if not l:
1453 newheads.append(r)
1449 newheads.append(r)
1454 else:
1450 else:
1455 newheads.append(r)
1451 newheads.append(r)
1456 if len(newheads) > len(remote_heads):
1452 if len(newheads) > len(remote_heads):
1457 warn = 1
1453 warn = 1
1458
1454
1459 if warn:
1455 if warn:
1460 self.ui.warn(_("abort: push creates new remote branches!\n"))
1456 self.ui.warn(_("abort: push creates new remote branches!\n"))
1461 self.ui.status(_("(did you forget to merge?"
1457 self.ui.status(_("(did you forget to merge?"
1462 " use push -f to force)\n"))
1458 " use push -f to force)\n"))
1463 return None, 1
1459 return None, 1
1464 elif inc:
1460 elif inc:
1465 self.ui.warn(_("note: unsynced remote changes!\n"))
1461 self.ui.warn(_("note: unsynced remote changes!\n"))
1466
1462
1467
1463
1468 if revs is None:
1464 if revs is None:
1469 cg = self.changegroup(update, 'push')
1465 cg = self.changegroup(update, 'push')
1470 else:
1466 else:
1471 cg = self.changegroupsubset(update, revs, 'push')
1467 cg = self.changegroupsubset(update, revs, 'push')
1472 return cg, remote_heads
1468 return cg, remote_heads
1473
1469
1474 def push_addchangegroup(self, remote, force, revs):
1470 def push_addchangegroup(self, remote, force, revs):
1475 lock = remote.lock()
1471 lock = remote.lock()
1476 try:
1472 try:
1477 ret = self.prepush(remote, force, revs)
1473 ret = self.prepush(remote, force, revs)
1478 if ret[0] is not None:
1474 if ret[0] is not None:
1479 cg, remote_heads = ret
1475 cg, remote_heads = ret
1480 return remote.addchangegroup(cg, 'push', self.url())
1476 return remote.addchangegroup(cg, 'push', self.url())
1481 return ret[1]
1477 return ret[1]
1482 finally:
1478 finally:
1483 del lock
1479 del lock
1484
1480
1485 def push_unbundle(self, remote, force, revs):
1481 def push_unbundle(self, remote, force, revs):
1486 # local repo finds heads on server, finds out what revs it
1482 # local repo finds heads on server, finds out what revs it
1487 # must push. once revs transferred, if server finds it has
1483 # must push. once revs transferred, if server finds it has
1488 # different heads (someone else won commit/push race), server
1484 # different heads (someone else won commit/push race), server
1489 # aborts.
1485 # aborts.
1490
1486
1491 ret = self.prepush(remote, force, revs)
1487 ret = self.prepush(remote, force, revs)
1492 if ret[0] is not None:
1488 if ret[0] is not None:
1493 cg, remote_heads = ret
1489 cg, remote_heads = ret
1494 if force: remote_heads = ['force']
1490 if force: remote_heads = ['force']
1495 return remote.unbundle(cg, remote_heads, 'push')
1491 return remote.unbundle(cg, remote_heads, 'push')
1496 return ret[1]
1492 return ret[1]
1497
1493
1498 def changegroupinfo(self, nodes):
1494 def changegroupinfo(self, nodes):
1499 self.ui.note(_("%d changesets found\n") % len(nodes))
1495 self.ui.note(_("%d changesets found\n") % len(nodes))
1500 if self.ui.debugflag:
1496 if self.ui.debugflag:
1501 self.ui.debug(_("List of changesets:\n"))
1497 self.ui.debug(_("List of changesets:\n"))
1502 for node in nodes:
1498 for node in nodes:
1503 self.ui.debug("%s\n" % hex(node))
1499 self.ui.debug("%s\n" % hex(node))
1504
1500
1505 def changegroupsubset(self, bases, heads, source):
1501 def changegroupsubset(self, bases, heads, source):
1506 """This function generates a changegroup consisting of all the nodes
1502 """This function generates a changegroup consisting of all the nodes
1507 that are descendents of any of the bases, and ancestors of any of
1503 that are descendents of any of the bases, and ancestors of any of
1508 the heads.
1504 the heads.
1509
1505
1510 It is fairly complex as determining which filenodes and which
1506 It is fairly complex as determining which filenodes and which
1511 manifest nodes need to be included for the changeset to be complete
1507 manifest nodes need to be included for the changeset to be complete
1512 is non-trivial.
1508 is non-trivial.
1513
1509
1514 Another wrinkle is doing the reverse, figuring out which changeset in
1510 Another wrinkle is doing the reverse, figuring out which changeset in
1515 the changegroup a particular filenode or manifestnode belongs to."""
1511 the changegroup a particular filenode or manifestnode belongs to."""
1516
1512
1517 self.hook('preoutgoing', throw=True, source=source)
1513 self.hook('preoutgoing', throw=True, source=source)
1518
1514
1519 # Set up some initial variables
1515 # Set up some initial variables
1520 # Make it easy to refer to self.changelog
1516 # Make it easy to refer to self.changelog
1521 cl = self.changelog
1517 cl = self.changelog
1522 # msng is short for missing - compute the list of changesets in this
1518 # msng is short for missing - compute the list of changesets in this
1523 # changegroup.
1519 # changegroup.
1524 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1520 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1525 self.changegroupinfo(msng_cl_lst)
1521 self.changegroupinfo(msng_cl_lst)
1526 # Some bases may turn out to be superfluous, and some heads may be
1522 # Some bases may turn out to be superfluous, and some heads may be
1527 # too. nodesbetween will return the minimal set of bases and heads
1523 # too. nodesbetween will return the minimal set of bases and heads
1528 # necessary to re-create the changegroup.
1524 # necessary to re-create the changegroup.
1529
1525
1530 # Known heads are the list of heads that it is assumed the recipient
1526 # Known heads are the list of heads that it is assumed the recipient
1531 # of this changegroup will know about.
1527 # of this changegroup will know about.
1532 knownheads = {}
1528 knownheads = {}
1533 # We assume that all parents of bases are known heads.
1529 # We assume that all parents of bases are known heads.
1534 for n in bases:
1530 for n in bases:
1535 for p in cl.parents(n):
1531 for p in cl.parents(n):
1536 if p != nullid:
1532 if p != nullid:
1537 knownheads[p] = 1
1533 knownheads[p] = 1
1538 knownheads = knownheads.keys()
1534 knownheads = knownheads.keys()
1539 if knownheads:
1535 if knownheads:
1540 # Now that we know what heads are known, we can compute which
1536 # Now that we know what heads are known, we can compute which
1541 # changesets are known. The recipient must know about all
1537 # changesets are known. The recipient must know about all
1542 # changesets required to reach the known heads from the null
1538 # changesets required to reach the known heads from the null
1543 # changeset.
1539 # changeset.
1544 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1540 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1545 junk = None
1541 junk = None
1546 # Transform the list into an ersatz set.
1542 # Transform the list into an ersatz set.
1547 has_cl_set = dict.fromkeys(has_cl_set)
1543 has_cl_set = dict.fromkeys(has_cl_set)
1548 else:
1544 else:
1549 # If there were no known heads, the recipient cannot be assumed to
1545 # If there were no known heads, the recipient cannot be assumed to
1550 # know about any changesets.
1546 # know about any changesets.
1551 has_cl_set = {}
1547 has_cl_set = {}
1552
1548
1553 # Make it easy to refer to self.manifest
1549 # Make it easy to refer to self.manifest
1554 mnfst = self.manifest
1550 mnfst = self.manifest
1555 # We don't know which manifests are missing yet
1551 # We don't know which manifests are missing yet
1556 msng_mnfst_set = {}
1552 msng_mnfst_set = {}
1557 # Nor do we know which filenodes are missing.
1553 # Nor do we know which filenodes are missing.
1558 msng_filenode_set = {}
1554 msng_filenode_set = {}
1559
1555
1560 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1556 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1561 junk = None
1557 junk = None
1562
1558
1563 # A changeset always belongs to itself, so the changenode lookup
1559 # A changeset always belongs to itself, so the changenode lookup
1564 # function for a changenode is identity.
1560 # function for a changenode is identity.
1565 def identity(x):
1561 def identity(x):
1566 return x
1562 return x
1567
1563
1568 # A function generating function. Sets up an environment for the
1564 # A function generating function. Sets up an environment for the
1569 # inner function.
1565 # inner function.
1570 def cmp_by_rev_func(revlog):
1566 def cmp_by_rev_func(revlog):
1571 # Compare two nodes by their revision number in the environment's
1567 # Compare two nodes by their revision number in the environment's
1572 # revision history. Since the revision number both represents the
1568 # revision history. Since the revision number both represents the
1573 # most efficient order to read the nodes in, and represents a
1569 # most efficient order to read the nodes in, and represents a
1574 # topological sorting of the nodes, this function is often useful.
1570 # topological sorting of the nodes, this function is often useful.
1575 def cmp_by_rev(a, b):
1571 def cmp_by_rev(a, b):
1576 return cmp(revlog.rev(a), revlog.rev(b))
1572 return cmp(revlog.rev(a), revlog.rev(b))
1577 return cmp_by_rev
1573 return cmp_by_rev
1578
1574
1579 # If we determine that a particular file or manifest node must be a
1575 # If we determine that a particular file or manifest node must be a
1580 # node that the recipient of the changegroup will already have, we can
1576 # node that the recipient of the changegroup will already have, we can
1581 # also assume the recipient will have all the parents. This function
1577 # also assume the recipient will have all the parents. This function
1582 # prunes them from the set of missing nodes.
1578 # prunes them from the set of missing nodes.
1583 def prune_parents(revlog, hasset, msngset):
1579 def prune_parents(revlog, hasset, msngset):
1584 haslst = hasset.keys()
1580 haslst = hasset.keys()
1585 haslst.sort(cmp_by_rev_func(revlog))
1581 haslst.sort(cmp_by_rev_func(revlog))
1586 for node in haslst:
1582 for node in haslst:
1587 parentlst = [p for p in revlog.parents(node) if p != nullid]
1583 parentlst = [p for p in revlog.parents(node) if p != nullid]
1588 while parentlst:
1584 while parentlst:
1589 n = parentlst.pop()
1585 n = parentlst.pop()
1590 if n not in hasset:
1586 if n not in hasset:
1591 hasset[n] = 1
1587 hasset[n] = 1
1592 p = [p for p in revlog.parents(n) if p != nullid]
1588 p = [p for p in revlog.parents(n) if p != nullid]
1593 parentlst.extend(p)
1589 parentlst.extend(p)
1594 for n in hasset:
1590 for n in hasset:
1595 msngset.pop(n, None)
1591 msngset.pop(n, None)
1596
1592
1597 # This is a function generating function used to set up an environment
1593 # This is a function generating function used to set up an environment
1598 # for the inner function to execute in.
1594 # for the inner function to execute in.
1599 def manifest_and_file_collector(changedfileset):
1595 def manifest_and_file_collector(changedfileset):
1600 # This is an information gathering function that gathers
1596 # This is an information gathering function that gathers
1601 # information from each changeset node that goes out as part of
1597 # information from each changeset node that goes out as part of
1602 # the changegroup. The information gathered is a list of which
1598 # the changegroup. The information gathered is a list of which
1603 # manifest nodes are potentially required (the recipient may
1599 # manifest nodes are potentially required (the recipient may
1604 # already have them) and total list of all files which were
1600 # already have them) and total list of all files which were
1605 # changed in any changeset in the changegroup.
1601 # changed in any changeset in the changegroup.
1606 #
1602 #
1607 # We also remember the first changenode we saw any manifest
1603 # We also remember the first changenode we saw any manifest
1608 # referenced by so we can later determine which changenode 'owns'
1604 # referenced by so we can later determine which changenode 'owns'
1609 # the manifest.
1605 # the manifest.
1610 def collect_manifests_and_files(clnode):
1606 def collect_manifests_and_files(clnode):
1611 c = cl.read(clnode)
1607 c = cl.read(clnode)
1612 for f in c[3]:
1608 for f in c[3]:
1613 # This is to make sure we only have one instance of each
1609 # This is to make sure we only have one instance of each
1614 # filename string for each filename.
1610 # filename string for each filename.
1615 changedfileset.setdefault(f, f)
1611 changedfileset.setdefault(f, f)
1616 msng_mnfst_set.setdefault(c[0], clnode)
1612 msng_mnfst_set.setdefault(c[0], clnode)
1617 return collect_manifests_and_files
1613 return collect_manifests_and_files
1618
1614
1619 # Figure out which manifest nodes (of the ones we think might be part
1615 # Figure out which manifest nodes (of the ones we think might be part
1620 # of the changegroup) the recipient must know about and remove them
1616 # of the changegroup) the recipient must know about and remove them
1621 # from the changegroup.
1617 # from the changegroup.
1622 def prune_manifests():
1618 def prune_manifests():
1623 has_mnfst_set = {}
1619 has_mnfst_set = {}
1624 for n in msng_mnfst_set:
1620 for n in msng_mnfst_set:
1625 # If a 'missing' manifest thinks it belongs to a changenode
1621 # If a 'missing' manifest thinks it belongs to a changenode
1626 # the recipient is assumed to have, obviously the recipient
1622 # the recipient is assumed to have, obviously the recipient
1627 # must have that manifest.
1623 # must have that manifest.
1628 linknode = cl.node(mnfst.linkrev(n))
1624 linknode = cl.node(mnfst.linkrev(n))
1629 if linknode in has_cl_set:
1625 if linknode in has_cl_set:
1630 has_mnfst_set[n] = 1
1626 has_mnfst_set[n] = 1
1631 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1627 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1632
1628
1633 # Use the information collected in collect_manifests_and_files to say
1629 # Use the information collected in collect_manifests_and_files to say
1634 # which changenode any manifestnode belongs to.
1630 # which changenode any manifestnode belongs to.
1635 def lookup_manifest_link(mnfstnode):
1631 def lookup_manifest_link(mnfstnode):
1636 return msng_mnfst_set[mnfstnode]
1632 return msng_mnfst_set[mnfstnode]
1637
1633
1638 # A function generating function that sets up the initial environment
1634 # A function generating function that sets up the initial environment
1639 # the inner function.
1635 # the inner function.
1640 def filenode_collector(changedfiles):
1636 def filenode_collector(changedfiles):
1641 next_rev = [0]
1637 next_rev = [0]
1642 # This gathers information from each manifestnode included in the
1638 # This gathers information from each manifestnode included in the
1643 # changegroup about which filenodes the manifest node references
1639 # changegroup about which filenodes the manifest node references
1644 # so we can include those in the changegroup too.
1640 # so we can include those in the changegroup too.
1645 #
1641 #
1646 # It also remembers which changenode each filenode belongs to. It
1642 # It also remembers which changenode each filenode belongs to. It
1647 # does this by assuming the a filenode belongs to the changenode
1643 # does this by assuming the a filenode belongs to the changenode
1648 # the first manifest that references it belongs to.
1644 # the first manifest that references it belongs to.
1649 def collect_msng_filenodes(mnfstnode):
1645 def collect_msng_filenodes(mnfstnode):
1650 r = mnfst.rev(mnfstnode)
1646 r = mnfst.rev(mnfstnode)
1651 if r == next_rev[0]:
1647 if r == next_rev[0]:
1652 # If the last rev we looked at was the one just previous,
1648 # If the last rev we looked at was the one just previous,
1653 # we only need to see a diff.
1649 # we only need to see a diff.
1654 deltamf = mnfst.readdelta(mnfstnode)
1650 deltamf = mnfst.readdelta(mnfstnode)
1655 # For each line in the delta
1651 # For each line in the delta
1656 for f, fnode in deltamf.items():
1652 for f, fnode in deltamf.items():
1657 f = changedfiles.get(f, None)
1653 f = changedfiles.get(f, None)
1658 # And if the file is in the list of files we care
1654 # And if the file is in the list of files we care
1659 # about.
1655 # about.
1660 if f is not None:
1656 if f is not None:
1661 # Get the changenode this manifest belongs to
1657 # Get the changenode this manifest belongs to
1662 clnode = msng_mnfst_set[mnfstnode]
1658 clnode = msng_mnfst_set[mnfstnode]
1663 # Create the set of filenodes for the file if
1659 # Create the set of filenodes for the file if
1664 # there isn't one already.
1660 # there isn't one already.
1665 ndset = msng_filenode_set.setdefault(f, {})
1661 ndset = msng_filenode_set.setdefault(f, {})
1666 # And set the filenode's changelog node to the
1662 # And set the filenode's changelog node to the
1667 # manifest's if it hasn't been set already.
1663 # manifest's if it hasn't been set already.
1668 ndset.setdefault(fnode, clnode)
1664 ndset.setdefault(fnode, clnode)
1669 else:
1665 else:
1670 # Otherwise we need a full manifest.
1666 # Otherwise we need a full manifest.
1671 m = mnfst.read(mnfstnode)
1667 m = mnfst.read(mnfstnode)
1672 # For every file in we care about.
1668 # For every file in we care about.
1673 for f in changedfiles:
1669 for f in changedfiles:
1674 fnode = m.get(f, None)
1670 fnode = m.get(f, None)
1675 # If it's in the manifest
1671 # If it's in the manifest
1676 if fnode is not None:
1672 if fnode is not None:
1677 # See comments above.
1673 # See comments above.
1678 clnode = msng_mnfst_set[mnfstnode]
1674 clnode = msng_mnfst_set[mnfstnode]
1679 ndset = msng_filenode_set.setdefault(f, {})
1675 ndset = msng_filenode_set.setdefault(f, {})
1680 ndset.setdefault(fnode, clnode)
1676 ndset.setdefault(fnode, clnode)
1681 # Remember the revision we hope to see next.
1677 # Remember the revision we hope to see next.
1682 next_rev[0] = r + 1
1678 next_rev[0] = r + 1
1683 return collect_msng_filenodes
1679 return collect_msng_filenodes
1684
1680
1685 # We have a list of filenodes we think we need for a file, lets remove
1681 # We have a list of filenodes we think we need for a file, lets remove
1686 # all those we now the recipient must have.
1682 # all those we now the recipient must have.
1687 def prune_filenodes(f, filerevlog):
1683 def prune_filenodes(f, filerevlog):
1688 msngset = msng_filenode_set[f]
1684 msngset = msng_filenode_set[f]
1689 hasset = {}
1685 hasset = {}
1690 # If a 'missing' filenode thinks it belongs to a changenode we
1686 # If a 'missing' filenode thinks it belongs to a changenode we
1691 # assume the recipient must have, then the recipient must have
1687 # assume the recipient must have, then the recipient must have
1692 # that filenode.
1688 # that filenode.
1693 for n in msngset:
1689 for n in msngset:
1694 clnode = cl.node(filerevlog.linkrev(n))
1690 clnode = cl.node(filerevlog.linkrev(n))
1695 if clnode in has_cl_set:
1691 if clnode in has_cl_set:
1696 hasset[n] = 1
1692 hasset[n] = 1
1697 prune_parents(filerevlog, hasset, msngset)
1693 prune_parents(filerevlog, hasset, msngset)
1698
1694
1699 # A function generator function that sets up the a context for the
1695 # A function generator function that sets up the a context for the
1700 # inner function.
1696 # inner function.
1701 def lookup_filenode_link_func(fname):
1697 def lookup_filenode_link_func(fname):
1702 msngset = msng_filenode_set[fname]
1698 msngset = msng_filenode_set[fname]
1703 # Lookup the changenode the filenode belongs to.
1699 # Lookup the changenode the filenode belongs to.
1704 def lookup_filenode_link(fnode):
1700 def lookup_filenode_link(fnode):
1705 return msngset[fnode]
1701 return msngset[fnode]
1706 return lookup_filenode_link
1702 return lookup_filenode_link
1707
1703
1708 # Now that we have all theses utility functions to help out and
1704 # Now that we have all theses utility functions to help out and
1709 # logically divide up the task, generate the group.
1705 # logically divide up the task, generate the group.
1710 def gengroup():
1706 def gengroup():
1711 # The set of changed files starts empty.
1707 # The set of changed files starts empty.
1712 changedfiles = {}
1708 changedfiles = {}
1713 # Create a changenode group generator that will call our functions
1709 # Create a changenode group generator that will call our functions
1714 # back to lookup the owning changenode and collect information.
1710 # back to lookup the owning changenode and collect information.
1715 group = cl.group(msng_cl_lst, identity,
1711 group = cl.group(msng_cl_lst, identity,
1716 manifest_and_file_collector(changedfiles))
1712 manifest_and_file_collector(changedfiles))
1717 for chnk in group:
1713 for chnk in group:
1718 yield chnk
1714 yield chnk
1719
1715
1720 # The list of manifests has been collected by the generator
1716 # The list of manifests has been collected by the generator
1721 # calling our functions back.
1717 # calling our functions back.
1722 prune_manifests()
1718 prune_manifests()
1723 msng_mnfst_lst = msng_mnfst_set.keys()
1719 msng_mnfst_lst = msng_mnfst_set.keys()
1724 # Sort the manifestnodes by revision number.
1720 # Sort the manifestnodes by revision number.
1725 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1721 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1726 # Create a generator for the manifestnodes that calls our lookup
1722 # Create a generator for the manifestnodes that calls our lookup
1727 # and data collection functions back.
1723 # and data collection functions back.
1728 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1724 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1729 filenode_collector(changedfiles))
1725 filenode_collector(changedfiles))
1730 for chnk in group:
1726 for chnk in group:
1731 yield chnk
1727 yield chnk
1732
1728
1733 # These are no longer needed, dereference and toss the memory for
1729 # These are no longer needed, dereference and toss the memory for
1734 # them.
1730 # them.
1735 msng_mnfst_lst = None
1731 msng_mnfst_lst = None
1736 msng_mnfst_set.clear()
1732 msng_mnfst_set.clear()
1737
1733
1738 changedfiles = changedfiles.keys()
1734 changedfiles = changedfiles.keys()
1739 changedfiles.sort()
1735 changedfiles.sort()
1740 # Go through all our files in order sorted by name.
1736 # Go through all our files in order sorted by name.
1741 for fname in changedfiles:
1737 for fname in changedfiles:
1742 filerevlog = self.file(fname)
1738 filerevlog = self.file(fname)
1743 if filerevlog.count() == 0:
1739 if filerevlog.count() == 0:
1744 raise util.Abort(_("empty or missing revlog for %s") % fname)
1740 raise util.Abort(_("empty or missing revlog for %s") % fname)
1745 # Toss out the filenodes that the recipient isn't really
1741 # Toss out the filenodes that the recipient isn't really
1746 # missing.
1742 # missing.
1747 if msng_filenode_set.has_key(fname):
1743 if msng_filenode_set.has_key(fname):
1748 prune_filenodes(fname, filerevlog)
1744 prune_filenodes(fname, filerevlog)
1749 msng_filenode_lst = msng_filenode_set[fname].keys()
1745 msng_filenode_lst = msng_filenode_set[fname].keys()
1750 else:
1746 else:
1751 msng_filenode_lst = []
1747 msng_filenode_lst = []
1752 # If any filenodes are left, generate the group for them,
1748 # If any filenodes are left, generate the group for them,
1753 # otherwise don't bother.
1749 # otherwise don't bother.
1754 if len(msng_filenode_lst) > 0:
1750 if len(msng_filenode_lst) > 0:
1755 yield changegroup.chunkheader(len(fname))
1751 yield changegroup.chunkheader(len(fname))
1756 yield fname
1752 yield fname
1757 # Sort the filenodes by their revision #
1753 # Sort the filenodes by their revision #
1758 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1754 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1759 # Create a group generator and only pass in a changenode
1755 # Create a group generator and only pass in a changenode
1760 # lookup function as we need to collect no information
1756 # lookup function as we need to collect no information
1761 # from filenodes.
1757 # from filenodes.
1762 group = filerevlog.group(msng_filenode_lst,
1758 group = filerevlog.group(msng_filenode_lst,
1763 lookup_filenode_link_func(fname))
1759 lookup_filenode_link_func(fname))
1764 for chnk in group:
1760 for chnk in group:
1765 yield chnk
1761 yield chnk
1766 if msng_filenode_set.has_key(fname):
1762 if msng_filenode_set.has_key(fname):
1767 # Don't need this anymore, toss it to free memory.
1763 # Don't need this anymore, toss it to free memory.
1768 del msng_filenode_set[fname]
1764 del msng_filenode_set[fname]
1769 # Signal that no more groups are left.
1765 # Signal that no more groups are left.
1770 yield changegroup.closechunk()
1766 yield changegroup.closechunk()
1771
1767
1772 if msng_cl_lst:
1768 if msng_cl_lst:
1773 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1769 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1774
1770
1775 return util.chunkbuffer(gengroup())
1771 return util.chunkbuffer(gengroup())
1776
1772
1777 def changegroup(self, basenodes, source):
1773 def changegroup(self, basenodes, source):
1778 """Generate a changegroup of all nodes that we have that a recipient
1774 """Generate a changegroup of all nodes that we have that a recipient
1779 doesn't.
1775 doesn't.
1780
1776
1781 This is much easier than the previous function as we can assume that
1777 This is much easier than the previous function as we can assume that
1782 the recipient has any changenode we aren't sending them."""
1778 the recipient has any changenode we aren't sending them."""
1783
1779
1784 self.hook('preoutgoing', throw=True, source=source)
1780 self.hook('preoutgoing', throw=True, source=source)
1785
1781
1786 cl = self.changelog
1782 cl = self.changelog
1787 nodes = cl.nodesbetween(basenodes, None)[0]
1783 nodes = cl.nodesbetween(basenodes, None)[0]
1788 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1784 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1789 self.changegroupinfo(nodes)
1785 self.changegroupinfo(nodes)
1790
1786
1791 def identity(x):
1787 def identity(x):
1792 return x
1788 return x
1793
1789
1794 def gennodelst(revlog):
1790 def gennodelst(revlog):
1795 for r in xrange(0, revlog.count()):
1791 for r in xrange(0, revlog.count()):
1796 n = revlog.node(r)
1792 n = revlog.node(r)
1797 if revlog.linkrev(n) in revset:
1793 if revlog.linkrev(n) in revset:
1798 yield n
1794 yield n
1799
1795
1800 def changed_file_collector(changedfileset):
1796 def changed_file_collector(changedfileset):
1801 def collect_changed_files(clnode):
1797 def collect_changed_files(clnode):
1802 c = cl.read(clnode)
1798 c = cl.read(clnode)
1803 for fname in c[3]:
1799 for fname in c[3]:
1804 changedfileset[fname] = 1
1800 changedfileset[fname] = 1
1805 return collect_changed_files
1801 return collect_changed_files
1806
1802
1807 def lookuprevlink_func(revlog):
1803 def lookuprevlink_func(revlog):
1808 def lookuprevlink(n):
1804 def lookuprevlink(n):
1809 return cl.node(revlog.linkrev(n))
1805 return cl.node(revlog.linkrev(n))
1810 return lookuprevlink
1806 return lookuprevlink
1811
1807
1812 def gengroup():
1808 def gengroup():
1813 # construct a list of all changed files
1809 # construct a list of all changed files
1814 changedfiles = {}
1810 changedfiles = {}
1815
1811
1816 for chnk in cl.group(nodes, identity,
1812 for chnk in cl.group(nodes, identity,
1817 changed_file_collector(changedfiles)):
1813 changed_file_collector(changedfiles)):
1818 yield chnk
1814 yield chnk
1819 changedfiles = changedfiles.keys()
1815 changedfiles = changedfiles.keys()
1820 changedfiles.sort()
1816 changedfiles.sort()
1821
1817
1822 mnfst = self.manifest
1818 mnfst = self.manifest
1823 nodeiter = gennodelst(mnfst)
1819 nodeiter = gennodelst(mnfst)
1824 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1820 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1825 yield chnk
1821 yield chnk
1826
1822
1827 for fname in changedfiles:
1823 for fname in changedfiles:
1828 filerevlog = self.file(fname)
1824 filerevlog = self.file(fname)
1829 if filerevlog.count() == 0:
1825 if filerevlog.count() == 0:
1830 raise util.Abort(_("empty or missing revlog for %s") % fname)
1826 raise util.Abort(_("empty or missing revlog for %s") % fname)
1831 nodeiter = gennodelst(filerevlog)
1827 nodeiter = gennodelst(filerevlog)
1832 nodeiter = list(nodeiter)
1828 nodeiter = list(nodeiter)
1833 if nodeiter:
1829 if nodeiter:
1834 yield changegroup.chunkheader(len(fname))
1830 yield changegroup.chunkheader(len(fname))
1835 yield fname
1831 yield fname
1836 lookup = lookuprevlink_func(filerevlog)
1832 lookup = lookuprevlink_func(filerevlog)
1837 for chnk in filerevlog.group(nodeiter, lookup):
1833 for chnk in filerevlog.group(nodeiter, lookup):
1838 yield chnk
1834 yield chnk
1839
1835
1840 yield changegroup.closechunk()
1836 yield changegroup.closechunk()
1841
1837
1842 if nodes:
1838 if nodes:
1843 self.hook('outgoing', node=hex(nodes[0]), source=source)
1839 self.hook('outgoing', node=hex(nodes[0]), source=source)
1844
1840
1845 return util.chunkbuffer(gengroup())
1841 return util.chunkbuffer(gengroup())
1846
1842
1847 def addchangegroup(self, source, srctype, url):
1843 def addchangegroup(self, source, srctype, url):
1848 """add changegroup to repo.
1844 """add changegroup to repo.
1849
1845
1850 return values:
1846 return values:
1851 - nothing changed or no source: 0
1847 - nothing changed or no source: 0
1852 - more heads than before: 1+added heads (2..n)
1848 - more heads than before: 1+added heads (2..n)
1853 - less heads than before: -1-removed heads (-2..-n)
1849 - less heads than before: -1-removed heads (-2..-n)
1854 - number of heads stays the same: 1
1850 - number of heads stays the same: 1
1855 """
1851 """
1856 def csmap(x):
1852 def csmap(x):
1857 self.ui.debug(_("add changeset %s\n") % short(x))
1853 self.ui.debug(_("add changeset %s\n") % short(x))
1858 return cl.count()
1854 return cl.count()
1859
1855
1860 def revmap(x):
1856 def revmap(x):
1861 return cl.rev(x)
1857 return cl.rev(x)
1862
1858
1863 if not source:
1859 if not source:
1864 return 0
1860 return 0
1865
1861
1866 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1862 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1867
1863
1868 changesets = files = revisions = 0
1864 changesets = files = revisions = 0
1869
1865
1870 # write changelog data to temp files so concurrent readers will not see
1866 # write changelog data to temp files so concurrent readers will not see
1871 # inconsistent view
1867 # inconsistent view
1872 cl = self.changelog
1868 cl = self.changelog
1873 cl.delayupdate()
1869 cl.delayupdate()
1874 oldheads = len(cl.heads())
1870 oldheads = len(cl.heads())
1875
1871
1876 tr = self.transaction()
1872 tr = self.transaction()
1877 try:
1873 try:
1878 trp = weakref.proxy(tr)
1874 trp = weakref.proxy(tr)
1879 # pull off the changeset group
1875 # pull off the changeset group
1880 self.ui.status(_("adding changesets\n"))
1876 self.ui.status(_("adding changesets\n"))
1881 cor = cl.count() - 1
1877 cor = cl.count() - 1
1882 chunkiter = changegroup.chunkiter(source)
1878 chunkiter = changegroup.chunkiter(source)
1883 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1879 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1884 raise util.Abort(_("received changelog group is empty"))
1880 raise util.Abort(_("received changelog group is empty"))
1885 cnr = cl.count() - 1
1881 cnr = cl.count() - 1
1886 changesets = cnr - cor
1882 changesets = cnr - cor
1887
1883
1888 # pull off the manifest group
1884 # pull off the manifest group
1889 self.ui.status(_("adding manifests\n"))
1885 self.ui.status(_("adding manifests\n"))
1890 chunkiter = changegroup.chunkiter(source)
1886 chunkiter = changegroup.chunkiter(source)
1891 # no need to check for empty manifest group here:
1887 # no need to check for empty manifest group here:
1892 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1888 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1893 # no new manifest will be created and the manifest group will
1889 # no new manifest will be created and the manifest group will
1894 # be empty during the pull
1890 # be empty during the pull
1895 self.manifest.addgroup(chunkiter, revmap, trp)
1891 self.manifest.addgroup(chunkiter, revmap, trp)
1896
1892
1897 # process the files
1893 # process the files
1898 self.ui.status(_("adding file changes\n"))
1894 self.ui.status(_("adding file changes\n"))
1899 while 1:
1895 while 1:
1900 f = changegroup.getchunk(source)
1896 f = changegroup.getchunk(source)
1901 if not f:
1897 if not f:
1902 break
1898 break
1903 self.ui.debug(_("adding %s revisions\n") % f)
1899 self.ui.debug(_("adding %s revisions\n") % f)
1904 fl = self.file(f)
1900 fl = self.file(f)
1905 o = fl.count()
1901 o = fl.count()
1906 chunkiter = changegroup.chunkiter(source)
1902 chunkiter = changegroup.chunkiter(source)
1907 if fl.addgroup(chunkiter, revmap, trp) is None:
1903 if fl.addgroup(chunkiter, revmap, trp) is None:
1908 raise util.Abort(_("received file revlog group is empty"))
1904 raise util.Abort(_("received file revlog group is empty"))
1909 revisions += fl.count() - o
1905 revisions += fl.count() - o
1910 files += 1
1906 files += 1
1911
1907
1912 # make changelog see real files again
1908 # make changelog see real files again
1913 cl.finalize(trp)
1909 cl.finalize(trp)
1914
1910
1915 newheads = len(self.changelog.heads())
1911 newheads = len(self.changelog.heads())
1916 heads = ""
1912 heads = ""
1917 if oldheads and newheads != oldheads:
1913 if oldheads and newheads != oldheads:
1918 heads = _(" (%+d heads)") % (newheads - oldheads)
1914 heads = _(" (%+d heads)") % (newheads - oldheads)
1919
1915
1920 self.ui.status(_("added %d changesets"
1916 self.ui.status(_("added %d changesets"
1921 " with %d changes to %d files%s\n")
1917 " with %d changes to %d files%s\n")
1922 % (changesets, revisions, files, heads))
1918 % (changesets, revisions, files, heads))
1923
1919
1924 if changesets > 0:
1920 if changesets > 0:
1925 self.hook('pretxnchangegroup', throw=True,
1921 self.hook('pretxnchangegroup', throw=True,
1926 node=hex(self.changelog.node(cor+1)), source=srctype,
1922 node=hex(self.changelog.node(cor+1)), source=srctype,
1927 url=url)
1923 url=url)
1928
1924
1929 tr.close()
1925 tr.close()
1930 finally:
1926 finally:
1931 del tr
1927 del tr
1932
1928
1933 if changesets > 0:
1929 if changesets > 0:
1934 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1930 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1935 source=srctype, url=url)
1931 source=srctype, url=url)
1936
1932
1937 for i in xrange(cor + 1, cnr + 1):
1933 for i in xrange(cor + 1, cnr + 1):
1938 self.hook("incoming", node=hex(self.changelog.node(i)),
1934 self.hook("incoming", node=hex(self.changelog.node(i)),
1939 source=srctype, url=url)
1935 source=srctype, url=url)
1940
1936
1941 # never return 0 here:
1937 # never return 0 here:
1942 if newheads < oldheads:
1938 if newheads < oldheads:
1943 return newheads - oldheads - 1
1939 return newheads - oldheads - 1
1944 else:
1940 else:
1945 return newheads - oldheads + 1
1941 return newheads - oldheads + 1
1946
1942
1947
1943
1948 def stream_in(self, remote):
1944 def stream_in(self, remote):
1949 fp = remote.stream_out()
1945 fp = remote.stream_out()
1950 l = fp.readline()
1946 l = fp.readline()
1951 try:
1947 try:
1952 resp = int(l)
1948 resp = int(l)
1953 except ValueError:
1949 except ValueError:
1954 raise util.UnexpectedOutput(
1950 raise util.UnexpectedOutput(
1955 _('Unexpected response from remote server:'), l)
1951 _('Unexpected response from remote server:'), l)
1956 if resp == 1:
1952 if resp == 1:
1957 raise util.Abort(_('operation forbidden by server'))
1953 raise util.Abort(_('operation forbidden by server'))
1958 elif resp == 2:
1954 elif resp == 2:
1959 raise util.Abort(_('locking the remote repository failed'))
1955 raise util.Abort(_('locking the remote repository failed'))
1960 elif resp != 0:
1956 elif resp != 0:
1961 raise util.Abort(_('the server sent an unknown error code'))
1957 raise util.Abort(_('the server sent an unknown error code'))
1962 self.ui.status(_('streaming all changes\n'))
1958 self.ui.status(_('streaming all changes\n'))
1963 l = fp.readline()
1959 l = fp.readline()
1964 try:
1960 try:
1965 total_files, total_bytes = map(int, l.split(' ', 1))
1961 total_files, total_bytes = map(int, l.split(' ', 1))
1966 except ValueError, TypeError:
1962 except ValueError, TypeError:
1967 raise util.UnexpectedOutput(
1963 raise util.UnexpectedOutput(
1968 _('Unexpected response from remote server:'), l)
1964 _('Unexpected response from remote server:'), l)
1969 self.ui.status(_('%d files to transfer, %s of data\n') %
1965 self.ui.status(_('%d files to transfer, %s of data\n') %
1970 (total_files, util.bytecount(total_bytes)))
1966 (total_files, util.bytecount(total_bytes)))
1971 start = time.time()
1967 start = time.time()
1972 for i in xrange(total_files):
1968 for i in xrange(total_files):
1973 # XXX doesn't support '\n' or '\r' in filenames
1969 # XXX doesn't support '\n' or '\r' in filenames
1974 l = fp.readline()
1970 l = fp.readline()
1975 try:
1971 try:
1976 name, size = l.split('\0', 1)
1972 name, size = l.split('\0', 1)
1977 size = int(size)
1973 size = int(size)
1978 except ValueError, TypeError:
1974 except ValueError, TypeError:
1979 raise util.UnexpectedOutput(
1975 raise util.UnexpectedOutput(
1980 _('Unexpected response from remote server:'), l)
1976 _('Unexpected response from remote server:'), l)
1981 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1977 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1982 ofp = self.sopener(name, 'w')
1978 ofp = self.sopener(name, 'w')
1983 for chunk in util.filechunkiter(fp, limit=size):
1979 for chunk in util.filechunkiter(fp, limit=size):
1984 ofp.write(chunk)
1980 ofp.write(chunk)
1985 ofp.close()
1981 ofp.close()
1986 elapsed = time.time() - start
1982 elapsed = time.time() - start
1987 if elapsed <= 0:
1983 if elapsed <= 0:
1988 elapsed = 0.001
1984 elapsed = 0.001
1989 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1985 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1990 (util.bytecount(total_bytes), elapsed,
1986 (util.bytecount(total_bytes), elapsed,
1991 util.bytecount(total_bytes / elapsed)))
1987 util.bytecount(total_bytes / elapsed)))
1992 self.invalidate()
1988 self.invalidate()
1993 return len(self.heads()) + 1
1989 return len(self.heads()) + 1
1994
1990
1995 def clone(self, remote, heads=[], stream=False):
1991 def clone(self, remote, heads=[], stream=False):
1996 '''clone remote repository.
1992 '''clone remote repository.
1997
1993
1998 keyword arguments:
1994 keyword arguments:
1999 heads: list of revs to clone (forces use of pull)
1995 heads: list of revs to clone (forces use of pull)
2000 stream: use streaming clone if possible'''
1996 stream: use streaming clone if possible'''
2001
1997
2002 # now, all clients that can request uncompressed clones can
1998 # now, all clients that can request uncompressed clones can
2003 # read repo formats supported by all servers that can serve
1999 # read repo formats supported by all servers that can serve
2004 # them.
2000 # them.
2005
2001
2006 # if revlog format changes, client will have to check version
2002 # if revlog format changes, client will have to check version
2007 # and format flags on "stream" capability, and use
2003 # and format flags on "stream" capability, and use
2008 # uncompressed only if compatible.
2004 # uncompressed only if compatible.
2009
2005
2010 if stream and not heads and remote.capable('stream'):
2006 if stream and not heads and remote.capable('stream'):
2011 return self.stream_in(remote)
2007 return self.stream_in(remote)
2012 return self.pull(remote, heads)
2008 return self.pull(remote, heads)
2013
2009
2014 # used to avoid circular references so destructors work
2010 # used to avoid circular references so destructors work
2015 def aftertrans(files):
2011 def aftertrans(files):
2016 renamefiles = [tuple(t) for t in files]
2012 renamefiles = [tuple(t) for t in files]
2017 def a():
2013 def a():
2018 for src, dest in renamefiles:
2014 for src, dest in renamefiles:
2019 util.rename(src, dest)
2015 util.rename(src, dest)
2020 return a
2016 return a
2021
2017
2022 def instance(ui, path, create):
2018 def instance(ui, path, create):
2023 return localrepository(ui, util.drop_scheme('file', path), create)
2019 return localrepository(ui, util.drop_scheme('file', path), create)
2024
2020
2025 def islocal(path):
2021 def islocal(path):
2026 return True
2022 return True
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now