##// END OF EJS Templates
localrepo: rename in-memory tag cache instance attributes (issue548)....
Greg Ward -
r9146:5614a628 default
parent child Browse files
Show More
@@ -1,131 +1,131 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 from mercurial import cmdutil, match, commands
4 from mercurial import cmdutil, match, commands
5 import time, os, sys
5 import time, os, sys
6
6
7 def timer(func):
7 def timer(func):
8 results = []
8 results = []
9 begin = time.time()
9 begin = time.time()
10 count = 0
10 count = 0
11 while 1:
11 while 1:
12 ostart = os.times()
12 ostart = os.times()
13 cstart = time.time()
13 cstart = time.time()
14 r = func()
14 r = func()
15 cstop = time.time()
15 cstop = time.time()
16 ostop = os.times()
16 ostop = os.times()
17 count += 1
17 count += 1
18 a, b = ostart, ostop
18 a, b = ostart, ostop
19 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
19 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
20 if cstop - begin > 3 and count >= 100:
20 if cstop - begin > 3 and count >= 100:
21 break
21 break
22 if cstop - begin > 10 and count >= 3:
22 if cstop - begin > 10 and count >= 3:
23 break
23 break
24 if r:
24 if r:
25 sys.stderr.write("! result: %s\n" % r)
25 sys.stderr.write("! result: %s\n" % r)
26 m = min(results)
26 m = min(results)
27 sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
27 sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
28 % (m[0], m[1] + m[2], m[1], m[2], count))
28 % (m[0], m[1] + m[2], m[1], m[2], count))
29
29
30 def perfwalk(ui, repo, *pats):
30 def perfwalk(ui, repo, *pats):
31 try:
31 try:
32 m = cmdutil.match(repo, pats, {})
32 m = cmdutil.match(repo, pats, {})
33 timer(lambda: len(list(repo.dirstate.walk(m, True, False))))
33 timer(lambda: len(list(repo.dirstate.walk(m, True, False))))
34 except:
34 except:
35 try:
35 try:
36 m = cmdutil.match(repo, pats, {})
36 m = cmdutil.match(repo, pats, {})
37 timer(lambda: len([b for a,b,c in repo.dirstate.statwalk([], m)]))
37 timer(lambda: len([b for a,b,c in repo.dirstate.statwalk([], m)]))
38 except:
38 except:
39 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
39 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
40
40
41 def perfstatus(ui, repo, *pats):
41 def perfstatus(ui, repo, *pats):
42 #m = match.always(repo.root, repo.getcwd())
42 #m = match.always(repo.root, repo.getcwd())
43 #timer(lambda: sum(map(len, repo.dirstate.status(m, False, False, False))))
43 #timer(lambda: sum(map(len, repo.dirstate.status(m, False, False, False))))
44 timer(lambda: sum(map(len, repo.status())))
44 timer(lambda: sum(map(len, repo.status())))
45
45
46 def perfheads(ui, repo):
46 def perfheads(ui, repo):
47 timer(lambda: len(repo.changelog.heads()))
47 timer(lambda: len(repo.changelog.heads()))
48
48
49 def perftags(ui, repo):
49 def perftags(ui, repo):
50 import mercurial.changelog, mercurial.manifest
50 import mercurial.changelog, mercurial.manifest
51 def t():
51 def t():
52 repo.changelog = mercurial.changelog.changelog(repo.sopener)
52 repo.changelog = mercurial.changelog.changelog(repo.sopener)
53 repo.manifest = mercurial.manifest.manifest(repo.sopener)
53 repo.manifest = mercurial.manifest.manifest(repo.sopener)
54 repo.tagscache = None
54 repo._tags = None
55 return len(repo.tags())
55 return len(repo.tags())
56 timer(t)
56 timer(t)
57
57
58 def perfdirstate(ui, repo):
58 def perfdirstate(ui, repo):
59 "a" in repo.dirstate
59 "a" in repo.dirstate
60 def d():
60 def d():
61 repo.dirstate.invalidate()
61 repo.dirstate.invalidate()
62 "a" in repo.dirstate
62 "a" in repo.dirstate
63 timer(d)
63 timer(d)
64
64
65 def perfdirstatedirs(ui, repo):
65 def perfdirstatedirs(ui, repo):
66 "a" in repo.dirstate
66 "a" in repo.dirstate
67 def d():
67 def d():
68 "a" in repo.dirstate._dirs
68 "a" in repo.dirstate._dirs
69 del repo.dirstate._dirs
69 del repo.dirstate._dirs
70 timer(d)
70 timer(d)
71
71
72 def perfmanifest(ui, repo):
72 def perfmanifest(ui, repo):
73 def d():
73 def d():
74 t = repo.manifest.tip()
74 t = repo.manifest.tip()
75 m = repo.manifest.read(t)
75 m = repo.manifest.read(t)
76 repo.manifest.mapcache = None
76 repo.manifest.mapcache = None
77 repo.manifest._cache = None
77 repo.manifest._cache = None
78 timer(d)
78 timer(d)
79
79
80 def perfindex(ui, repo):
80 def perfindex(ui, repo):
81 import mercurial.changelog
81 import mercurial.changelog
82 def d():
82 def d():
83 t = repo.changelog.tip()
83 t = repo.changelog.tip()
84 repo.changelog = mercurial.changelog.changelog(repo.sopener)
84 repo.changelog = mercurial.changelog.changelog(repo.sopener)
85 repo.changelog._loadindexmap()
85 repo.changelog._loadindexmap()
86 timer(d)
86 timer(d)
87
87
88 def perfstartup(ui, repo):
88 def perfstartup(ui, repo):
89 cmd = sys.argv[0]
89 cmd = sys.argv[0]
90 def d():
90 def d():
91 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
91 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
92 timer(d)
92 timer(d)
93
93
94 def perfparents(ui, repo):
94 def perfparents(ui, repo):
95 nl = [repo.changelog.node(i) for i in xrange(1000)]
95 nl = [repo.changelog.node(i) for i in xrange(1000)]
96 def d():
96 def d():
97 for n in nl:
97 for n in nl:
98 repo.changelog.parents(n)
98 repo.changelog.parents(n)
99 timer(d)
99 timer(d)
100
100
101 def perflookup(ui, repo, rev):
101 def perflookup(ui, repo, rev):
102 timer(lambda: len(repo.lookup(rev)))
102 timer(lambda: len(repo.lookup(rev)))
103
103
104 def perflog(ui, repo):
104 def perflog(ui, repo):
105 ui.pushbuffer()
105 ui.pushbuffer()
106 timer(lambda: commands.log(ui, repo, rev=[], date='', user=''))
106 timer(lambda: commands.log(ui, repo, rev=[], date='', user=''))
107 ui.popbuffer()
107 ui.popbuffer()
108
108
109 def perftemplating(ui, repo):
109 def perftemplating(ui, repo):
110 ui.pushbuffer()
110 ui.pushbuffer()
111 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
111 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
112 template='{date|shortdate} [{rev}:{node|short}]'
112 template='{date|shortdate} [{rev}:{node|short}]'
113 ' {author|person}: {desc|firstline}\n'))
113 ' {author|person}: {desc|firstline}\n'))
114 ui.popbuffer()
114 ui.popbuffer()
115
115
116 cmdtable = {
116 cmdtable = {
117 'perflookup': (perflookup, []),
117 'perflookup': (perflookup, []),
118 'perfparents': (perfparents, []),
118 'perfparents': (perfparents, []),
119 'perfstartup': (perfstartup, []),
119 'perfstartup': (perfstartup, []),
120 'perfstatus': (perfstatus, []),
120 'perfstatus': (perfstatus, []),
121 'perfwalk': (perfwalk, []),
121 'perfwalk': (perfwalk, []),
122 'perfmanifest': (perfmanifest, []),
122 'perfmanifest': (perfmanifest, []),
123 'perfindex': (perfindex, []),
123 'perfindex': (perfindex, []),
124 'perfheads': (perfheads, []),
124 'perfheads': (perfheads, []),
125 'perftags': (perftags, []),
125 'perftags': (perftags, []),
126 'perfdirstate': (perfdirstate, []),
126 'perfdirstate': (perfdirstate, []),
127 'perfdirstatedirs': (perfdirstate, []),
127 'perfdirstatedirs': (perfdirstate, []),
128 'perflog': (perflog, []),
128 'perflog': (perflog, []),
129 'perftemplating': (perftemplating, []),
129 'perftemplating': (perftemplating, []),
130 }
130 }
131
131
@@ -1,2194 +1,2200 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 self.tagscache = None
92 # These two define the set of tags for this repository. _tags
93 self._tagstypecache = None
93 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # 'local'. (Global tags are defined by .hgtags across all
95 # heads, and local tags are defined in .hg/localtags.) They
96 # constitute the in-memory cache of tags.
97 self._tags = None
98 self._tagtypes = None
99
94 self.branchcache = None
100 self.branchcache = None
95 self._ubranchcache = None # UTF-8 version of branchcache
101 self._ubranchcache = None # UTF-8 version of branchcache
96 self._branchcachetip = None
102 self._branchcachetip = None
97 self.nodetagscache = None
103 self.nodetagscache = None
98 self.filterpats = {}
104 self.filterpats = {}
99 self._datafilters = {}
105 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
101
107
102 @propertycache
108 @propertycache
103 def changelog(self):
109 def changelog(self):
104 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
105 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
106 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
107 if p.startswith(self.root):
113 if p.startswith(self.root):
108 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
109 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
110 return c
116 return c
111
117
112 @propertycache
118 @propertycache
113 def manifest(self):
119 def manifest(self):
114 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
115
121
116 @propertycache
122 @propertycache
117 def dirstate(self):
123 def dirstate(self):
118 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
119
125
120 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
121 if changeid is None:
127 if changeid is None:
122 return context.workingctx(self)
128 return context.workingctx(self)
123 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
124
130
125 def __nonzero__(self):
131 def __nonzero__(self):
126 return True
132 return True
127
133
128 def __len__(self):
134 def __len__(self):
129 return len(self.changelog)
135 return len(self.changelog)
130
136
131 def __iter__(self):
137 def __iter__(self):
132 for i in xrange(len(self)):
138 for i in xrange(len(self)):
133 yield i
139 yield i
134
140
135 def url(self):
141 def url(self):
136 return 'file:' + self.root
142 return 'file:' + self.root
137
143
138 def hook(self, name, throw=False, **args):
144 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
145 return hook.hook(self.ui, self, name, throw, **args)
140
146
141 tag_disallowed = ':\r\n'
147 tag_disallowed = ':\r\n'
142
148
143 def _tag(self, names, node, message, local, user, date, extra={}):
149 def _tag(self, names, node, message, local, user, date, extra={}):
144 if isinstance(names, str):
150 if isinstance(names, str):
145 allchars = names
151 allchars = names
146 names = (names,)
152 names = (names,)
147 else:
153 else:
148 allchars = ''.join(names)
154 allchars = ''.join(names)
149 for c in self.tag_disallowed:
155 for c in self.tag_disallowed:
150 if c in allchars:
156 if c in allchars:
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
152
158
153 for name in names:
159 for name in names:
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 local=local)
161 local=local)
156
162
157 def writetags(fp, names, munge, prevtags):
163 def writetags(fp, names, munge, prevtags):
158 fp.seek(0, 2)
164 fp.seek(0, 2)
159 if prevtags and prevtags[-1] != '\n':
165 if prevtags and prevtags[-1] != '\n':
160 fp.write('\n')
166 fp.write('\n')
161 for name in names:
167 for name in names:
162 m = munge and munge(name) or name
168 m = munge and munge(name) or name
163 if self._tagstypecache and name in self._tagstypecache:
169 if self._tagtypes and name in self._tagtypes:
164 old = self.tagscache.get(name, nullid)
170 old = self._tags.get(name, nullid)
165 fp.write('%s %s\n' % (hex(old), m))
171 fp.write('%s %s\n' % (hex(old), m))
166 fp.write('%s %s\n' % (hex(node), m))
172 fp.write('%s %s\n' % (hex(node), m))
167 fp.close()
173 fp.close()
168
174
169 prevtags = ''
175 prevtags = ''
170 if local:
176 if local:
171 try:
177 try:
172 fp = self.opener('localtags', 'r+')
178 fp = self.opener('localtags', 'r+')
173 except IOError:
179 except IOError:
174 fp = self.opener('localtags', 'a')
180 fp = self.opener('localtags', 'a')
175 else:
181 else:
176 prevtags = fp.read()
182 prevtags = fp.read()
177
183
178 # local tags are stored in the current charset
184 # local tags are stored in the current charset
179 writetags(fp, names, None, prevtags)
185 writetags(fp, names, None, prevtags)
180 for name in names:
186 for name in names:
181 self.hook('tag', node=hex(node), tag=name, local=local)
187 self.hook('tag', node=hex(node), tag=name, local=local)
182 return
188 return
183
189
184 try:
190 try:
185 fp = self.wfile('.hgtags', 'rb+')
191 fp = self.wfile('.hgtags', 'rb+')
186 except IOError:
192 except IOError:
187 fp = self.wfile('.hgtags', 'ab')
193 fp = self.wfile('.hgtags', 'ab')
188 else:
194 else:
189 prevtags = fp.read()
195 prevtags = fp.read()
190
196
191 # committed tags are stored in UTF-8
197 # committed tags are stored in UTF-8
192 writetags(fp, names, encoding.fromlocal, prevtags)
198 writetags(fp, names, encoding.fromlocal, prevtags)
193
199
194 if '.hgtags' not in self.dirstate:
200 if '.hgtags' not in self.dirstate:
195 self.add(['.hgtags'])
201 self.add(['.hgtags'])
196
202
197 m = match_.exact(self.root, '', ['.hgtags'])
203 m = match_.exact(self.root, '', ['.hgtags'])
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
199
205
200 for name in names:
206 for name in names:
201 self.hook('tag', node=hex(node), tag=name, local=local)
207 self.hook('tag', node=hex(node), tag=name, local=local)
202
208
203 return tagnode
209 return tagnode
204
210
205 def tag(self, names, node, message, local, user, date):
211 def tag(self, names, node, message, local, user, date):
206 '''tag a revision with one or more symbolic names.
212 '''tag a revision with one or more symbolic names.
207
213
208 names is a list of strings or, when adding a single tag, names may be a
214 names is a list of strings or, when adding a single tag, names may be a
209 string.
215 string.
210
216
211 if local is True, the tags are stored in a per-repository file.
217 if local is True, the tags are stored in a per-repository file.
212 otherwise, they are stored in the .hgtags file, and a new
218 otherwise, they are stored in the .hgtags file, and a new
213 changeset is committed with the change.
219 changeset is committed with the change.
214
220
215 keyword arguments:
221 keyword arguments:
216
222
217 local: whether to store tags in non-version-controlled file
223 local: whether to store tags in non-version-controlled file
218 (default False)
224 (default False)
219
225
220 message: commit message to use if committing
226 message: commit message to use if committing
221
227
222 user: name of user to use if committing
228 user: name of user to use if committing
223
229
224 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
225
231
226 for x in self.status()[:5]:
232 for x in self.status()[:5]:
227 if '.hgtags' in x:
233 if '.hgtags' in x:
228 raise util.Abort(_('working copy of .hgtags is changed '
234 raise util.Abort(_('working copy of .hgtags is changed '
229 '(please commit .hgtags manually)'))
235 '(please commit .hgtags manually)'))
230
236
231 self.tags() # instantiate the cache
237 self.tags() # instantiate the cache
232 self._tag(names, node, message, local, user, date)
238 self._tag(names, node, message, local, user, date)
233
239
234 def tags(self):
240 def tags(self):
235 '''return a mapping of tag to node'''
241 '''return a mapping of tag to node'''
236 if self.tagscache is None:
242 if self._tags is None:
237 (self.tagscache, self._tagstypecache) = self._findtags()
243 (self._tags, self._tagtypes) = self._findtags()
238
244
239 return self.tagscache
245 return self._tags
240
246
241 def _findtags(self):
247 def _findtags(self):
242 '''Do the hard work of finding tags. Return a pair of dicts
248 '''Do the hard work of finding tags. Return a pair of dicts
243 (tags, tagtypes) where tags maps tag name to node, and tagtypes
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
244 maps tag name to a string like \'global\' or \'local\'.
250 maps tag name to a string like \'global\' or \'local\'.
245 Subclasses or extensions are free to add their own tags, but
251 Subclasses or extensions are free to add their own tags, but
246 should be aware that the returned dicts will be retained for the
252 should be aware that the returned dicts will be retained for the
247 duration of the localrepo object.'''
253 duration of the localrepo object.'''
248
254
249 # XXX what tagtype should subclasses/extensions use? Currently
255 # XXX what tagtype should subclasses/extensions use? Currently
250 # mq and bookmarks add tags, but do not set the tagtype at all.
256 # mq and bookmarks add tags, but do not set the tagtype at all.
251 # Should each extension invent its own tag type? Should there
257 # Should each extension invent its own tag type? Should there
252 # be one tagtype for all such "virtual" tags? Or is the status
258 # be one tagtype for all such "virtual" tags? Or is the status
253 # quo fine?
259 # quo fine?
254
260
255 globaltags = {}
261 globaltags = {}
256 tagtypes = {}
262 tagtypes = {}
257
263
258 def readtags(lines, fn, tagtype):
264 def readtags(lines, fn, tagtype):
259 filetags = {}
265 filetags = {}
260 count = 0
266 count = 0
261
267
262 def warn(msg):
268 def warn(msg):
263 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
269 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
264
270
265 for l in lines:
271 for l in lines:
266 count += 1
272 count += 1
267 if not l:
273 if not l:
268 continue
274 continue
269 s = l.split(" ", 1)
275 s = l.split(" ", 1)
270 if len(s) != 2:
276 if len(s) != 2:
271 warn(_("cannot parse entry"))
277 warn(_("cannot parse entry"))
272 continue
278 continue
273 node, key = s
279 node, key = s
274 key = encoding.tolocal(key.strip()) # stored in UTF-8
280 key = encoding.tolocal(key.strip()) # stored in UTF-8
275 try:
281 try:
276 bin_n = bin(node)
282 bin_n = bin(node)
277 except TypeError:
283 except TypeError:
278 warn(_("node '%s' is not well formed") % node)
284 warn(_("node '%s' is not well formed") % node)
279 continue
285 continue
280 if bin_n not in self.changelog.nodemap:
286 if bin_n not in self.changelog.nodemap:
281 # silently ignore as pull -r might cause this
287 # silently ignore as pull -r might cause this
282 continue
288 continue
283
289
284 h = []
290 h = []
285 if key in filetags:
291 if key in filetags:
286 n, h = filetags[key]
292 n, h = filetags[key]
287 h.append(n)
293 h.append(n)
288 filetags[key] = (bin_n, h)
294 filetags[key] = (bin_n, h)
289
295
290 for k, nh in filetags.iteritems():
296 for k, nh in filetags.iteritems():
291 if k not in globaltags:
297 if k not in globaltags:
292 globaltags[k] = nh
298 globaltags[k] = nh
293 tagtypes[k] = tagtype
299 tagtypes[k] = tagtype
294 continue
300 continue
295
301
296 # we prefer the global tag if:
302 # we prefer the global tag if:
297 # it supercedes us OR
303 # it supercedes us OR
298 # mutual supercedes and it has a higher rank
304 # mutual supercedes and it has a higher rank
299 # otherwise we win because we're tip-most
305 # otherwise we win because we're tip-most
300 an, ah = nh
306 an, ah = nh
301 bn, bh = globaltags[k]
307 bn, bh = globaltags[k]
302 if (bn != an and an in bh and
308 if (bn != an and an in bh and
303 (bn not in ah or len(bh) > len(ah))):
309 (bn not in ah or len(bh) > len(ah))):
304 an = bn
310 an = bn
305 ah.extend([n for n in bh if n not in ah])
311 ah.extend([n for n in bh if n not in ah])
306 globaltags[k] = an, ah
312 globaltags[k] = an, ah
307 tagtypes[k] = tagtype
313 tagtypes[k] = tagtype
308
314
309 seen = set()
315 seen = set()
310 f = None
316 f = None
311 ctxs = []
317 ctxs = []
312 for node in self.heads():
318 for node in self.heads():
313 try:
319 try:
314 fnode = self[node].filenode('.hgtags')
320 fnode = self[node].filenode('.hgtags')
315 except error.LookupError:
321 except error.LookupError:
316 continue
322 continue
317 if fnode not in seen:
323 if fnode not in seen:
318 seen.add(fnode)
324 seen.add(fnode)
319 if not f:
325 if not f:
320 f = self.filectx('.hgtags', fileid=fnode)
326 f = self.filectx('.hgtags', fileid=fnode)
321 else:
327 else:
322 f = f.filectx(fnode)
328 f = f.filectx(fnode)
323 ctxs.append(f)
329 ctxs.append(f)
324
330
325 # read the tags file from each head, ending with the tip
331 # read the tags file from each head, ending with the tip
326 for f in reversed(ctxs):
332 for f in reversed(ctxs):
327 readtags(f.data().splitlines(), f, "global")
333 readtags(f.data().splitlines(), f, "global")
328
334
329 try:
335 try:
330 data = encoding.fromlocal(self.opener("localtags").read())
336 data = encoding.fromlocal(self.opener("localtags").read())
331 # localtags are stored in the local character set
337 # localtags are stored in the local character set
332 # while the internal tag table is stored in UTF-8
338 # while the internal tag table is stored in UTF-8
333 readtags(data.splitlines(), "localtags", "local")
339 readtags(data.splitlines(), "localtags", "local")
334 except IOError:
340 except IOError:
335 pass
341 pass
336
342
337 tags = {}
343 tags = {}
338 for k, nh in globaltags.iteritems():
344 for k, nh in globaltags.iteritems():
339 n = nh[0]
345 n = nh[0]
340 if n != nullid:
346 if n != nullid:
341 tags[k] = n
347 tags[k] = n
342 tags['tip'] = self.changelog.tip()
348 tags['tip'] = self.changelog.tip()
343 return (tags, tagtypes)
349 return (tags, tagtypes)
344
350
345 def tagtype(self, tagname):
351 def tagtype(self, tagname):
346 '''
352 '''
347 return the type of the given tag. result can be:
353 return the type of the given tag. result can be:
348
354
349 'local' : a local tag
355 'local' : a local tag
350 'global' : a global tag
356 'global' : a global tag
351 None : tag does not exist
357 None : tag does not exist
352 '''
358 '''
353
359
354 self.tags()
360 self.tags()
355
361
356 return self._tagstypecache.get(tagname)
362 return self._tagtypes.get(tagname)
357
363
358 def tagslist(self):
364 def tagslist(self):
359 '''return a list of tags ordered by revision'''
365 '''return a list of tags ordered by revision'''
360 l = []
366 l = []
361 for t, n in self.tags().iteritems():
367 for t, n in self.tags().iteritems():
362 try:
368 try:
363 r = self.changelog.rev(n)
369 r = self.changelog.rev(n)
364 except:
370 except:
365 r = -2 # sort to the beginning of the list if unknown
371 r = -2 # sort to the beginning of the list if unknown
366 l.append((r, t, n))
372 l.append((r, t, n))
367 return [(t, n) for r, t, n in sorted(l)]
373 return [(t, n) for r, t, n in sorted(l)]
368
374
369 def nodetags(self, node):
375 def nodetags(self, node):
370 '''return the tags associated with a node'''
376 '''return the tags associated with a node'''
371 if not self.nodetagscache:
377 if not self.nodetagscache:
372 self.nodetagscache = {}
378 self.nodetagscache = {}
373 for t, n in self.tags().iteritems():
379 for t, n in self.tags().iteritems():
374 self.nodetagscache.setdefault(n, []).append(t)
380 self.nodetagscache.setdefault(n, []).append(t)
375 return self.nodetagscache.get(node, [])
381 return self.nodetagscache.get(node, [])
376
382
377 def _branchtags(self, partial, lrev):
383 def _branchtags(self, partial, lrev):
378 # TODO: rename this function?
384 # TODO: rename this function?
379 tiprev = len(self) - 1
385 tiprev = len(self) - 1
380 if lrev != tiprev:
386 if lrev != tiprev:
381 self._updatebranchcache(partial, lrev+1, tiprev+1)
387 self._updatebranchcache(partial, lrev+1, tiprev+1)
382 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388 self._writebranchcache(partial, self.changelog.tip(), tiprev)
383
389
384 return partial
390 return partial
385
391
386 def branchmap(self):
392 def branchmap(self):
387 tip = self.changelog.tip()
393 tip = self.changelog.tip()
388 if self.branchcache is not None and self._branchcachetip == tip:
394 if self.branchcache is not None and self._branchcachetip == tip:
389 return self.branchcache
395 return self.branchcache
390
396
391 oldtip = self._branchcachetip
397 oldtip = self._branchcachetip
392 self._branchcachetip = tip
398 self._branchcachetip = tip
393 if self.branchcache is None:
399 if self.branchcache is None:
394 self.branchcache = {} # avoid recursion in changectx
400 self.branchcache = {} # avoid recursion in changectx
395 else:
401 else:
396 self.branchcache.clear() # keep using the same dict
402 self.branchcache.clear() # keep using the same dict
397 if oldtip is None or oldtip not in self.changelog.nodemap:
403 if oldtip is None or oldtip not in self.changelog.nodemap:
398 partial, last, lrev = self._readbranchcache()
404 partial, last, lrev = self._readbranchcache()
399 else:
405 else:
400 lrev = self.changelog.rev(oldtip)
406 lrev = self.changelog.rev(oldtip)
401 partial = self._ubranchcache
407 partial = self._ubranchcache
402
408
403 self._branchtags(partial, lrev)
409 self._branchtags(partial, lrev)
404 # this private cache holds all heads (not just tips)
410 # this private cache holds all heads (not just tips)
405 self._ubranchcache = partial
411 self._ubranchcache = partial
406
412
407 # the branch cache is stored on disk as UTF-8, but in the local
413 # the branch cache is stored on disk as UTF-8, but in the local
408 # charset internally
414 # charset internally
409 for k, v in partial.iteritems():
415 for k, v in partial.iteritems():
410 self.branchcache[encoding.tolocal(k)] = v
416 self.branchcache[encoding.tolocal(k)] = v
411 return self.branchcache
417 return self.branchcache
412
418
413
419
414 def branchtags(self):
420 def branchtags(self):
415 '''return a dict where branch names map to the tipmost head of
421 '''return a dict where branch names map to the tipmost head of
416 the branch, open heads come before closed'''
422 the branch, open heads come before closed'''
417 bt = {}
423 bt = {}
418 for bn, heads in self.branchmap().iteritems():
424 for bn, heads in self.branchmap().iteritems():
419 head = None
425 head = None
420 for i in range(len(heads)-1, -1, -1):
426 for i in range(len(heads)-1, -1, -1):
421 h = heads[i]
427 h = heads[i]
422 if 'close' not in self.changelog.read(h)[5]:
428 if 'close' not in self.changelog.read(h)[5]:
423 head = h
429 head = h
424 break
430 break
425 # no open heads were found
431 # no open heads were found
426 if head is None:
432 if head is None:
427 head = heads[-1]
433 head = heads[-1]
428 bt[bn] = head
434 bt[bn] = head
429 return bt
435 return bt
430
436
431
437
432 def _readbranchcache(self):
438 def _readbranchcache(self):
433 partial = {}
439 partial = {}
434 try:
440 try:
435 f = self.opener("branchheads.cache")
441 f = self.opener("branchheads.cache")
436 lines = f.read().split('\n')
442 lines = f.read().split('\n')
437 f.close()
443 f.close()
438 except (IOError, OSError):
444 except (IOError, OSError):
439 return {}, nullid, nullrev
445 return {}, nullid, nullrev
440
446
441 try:
447 try:
442 last, lrev = lines.pop(0).split(" ", 1)
448 last, lrev = lines.pop(0).split(" ", 1)
443 last, lrev = bin(last), int(lrev)
449 last, lrev = bin(last), int(lrev)
444 if lrev >= len(self) or self[lrev].node() != last:
450 if lrev >= len(self) or self[lrev].node() != last:
445 # invalidate the cache
451 # invalidate the cache
446 raise ValueError('invalidating branch cache (tip differs)')
452 raise ValueError('invalidating branch cache (tip differs)')
447 for l in lines:
453 for l in lines:
448 if not l: continue
454 if not l: continue
449 node, label = l.split(" ", 1)
455 node, label = l.split(" ", 1)
450 partial.setdefault(label.strip(), []).append(bin(node))
456 partial.setdefault(label.strip(), []).append(bin(node))
451 except KeyboardInterrupt:
457 except KeyboardInterrupt:
452 raise
458 raise
453 except Exception, inst:
459 except Exception, inst:
454 if self.ui.debugflag:
460 if self.ui.debugflag:
455 self.ui.warn(str(inst), '\n')
461 self.ui.warn(str(inst), '\n')
456 partial, last, lrev = {}, nullid, nullrev
462 partial, last, lrev = {}, nullid, nullrev
457 return partial, last, lrev
463 return partial, last, lrev
458
464
459 def _writebranchcache(self, branches, tip, tiprev):
465 def _writebranchcache(self, branches, tip, tiprev):
460 try:
466 try:
461 f = self.opener("branchheads.cache", "w", atomictemp=True)
467 f = self.opener("branchheads.cache", "w", atomictemp=True)
462 f.write("%s %s\n" % (hex(tip), tiprev))
468 f.write("%s %s\n" % (hex(tip), tiprev))
463 for label, nodes in branches.iteritems():
469 for label, nodes in branches.iteritems():
464 for node in nodes:
470 for node in nodes:
465 f.write("%s %s\n" % (hex(node), label))
471 f.write("%s %s\n" % (hex(node), label))
466 f.rename()
472 f.rename()
467 except (IOError, OSError):
473 except (IOError, OSError):
468 pass
474 pass
469
475
470 def _updatebranchcache(self, partial, start, end):
476 def _updatebranchcache(self, partial, start, end):
471 # collect new branch entries
477 # collect new branch entries
472 newbranches = {}
478 newbranches = {}
473 for r in xrange(start, end):
479 for r in xrange(start, end):
474 c = self[r]
480 c = self[r]
475 newbranches.setdefault(c.branch(), []).append(c.node())
481 newbranches.setdefault(c.branch(), []).append(c.node())
476 # if older branchheads are reachable from new ones, they aren't
482 # if older branchheads are reachable from new ones, they aren't
477 # really branchheads. Note checking parents is insufficient:
483 # really branchheads. Note checking parents is insufficient:
478 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
484 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
479 for branch, newnodes in newbranches.iteritems():
485 for branch, newnodes in newbranches.iteritems():
480 bheads = partial.setdefault(branch, [])
486 bheads = partial.setdefault(branch, [])
481 bheads.extend(newnodes)
487 bheads.extend(newnodes)
482 if len(bheads) < 2:
488 if len(bheads) < 2:
483 continue
489 continue
484 newbheads = []
490 newbheads = []
485 # starting from tip means fewer passes over reachable
491 # starting from tip means fewer passes over reachable
486 while newnodes:
492 while newnodes:
487 latest = newnodes.pop()
493 latest = newnodes.pop()
488 if latest not in bheads:
494 if latest not in bheads:
489 continue
495 continue
490 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
491 reachable = self.changelog.reachable(latest, minbhrev)
497 reachable = self.changelog.reachable(latest, minbhrev)
492 bheads = [b for b in bheads if b not in reachable]
498 bheads = [b for b in bheads if b not in reachable]
493 newbheads.insert(0, latest)
499 newbheads.insert(0, latest)
494 bheads.extend(newbheads)
500 bheads.extend(newbheads)
495 partial[branch] = bheads
501 partial[branch] = bheads
496
502
497 def lookup(self, key):
503 def lookup(self, key):
498 if isinstance(key, int):
504 if isinstance(key, int):
499 return self.changelog.node(key)
505 return self.changelog.node(key)
500 elif key == '.':
506 elif key == '.':
501 return self.dirstate.parents()[0]
507 return self.dirstate.parents()[0]
502 elif key == 'null':
508 elif key == 'null':
503 return nullid
509 return nullid
504 elif key == 'tip':
510 elif key == 'tip':
505 return self.changelog.tip()
511 return self.changelog.tip()
506 n = self.changelog._match(key)
512 n = self.changelog._match(key)
507 if n:
513 if n:
508 return n
514 return n
509 if key in self.tags():
515 if key in self.tags():
510 return self.tags()[key]
516 return self.tags()[key]
511 if key in self.branchtags():
517 if key in self.branchtags():
512 return self.branchtags()[key]
518 return self.branchtags()[key]
513 n = self.changelog._partialmatch(key)
519 n = self.changelog._partialmatch(key)
514 if n:
520 if n:
515 return n
521 return n
516
522
517 # can't find key, check if it might have come from damaged dirstate
523 # can't find key, check if it might have come from damaged dirstate
518 if key in self.dirstate.parents():
524 if key in self.dirstate.parents():
519 raise error.Abort(_("working directory has unknown parent '%s'!")
525 raise error.Abort(_("working directory has unknown parent '%s'!")
520 % short(key))
526 % short(key))
521 try:
527 try:
522 if len(key) == 20:
528 if len(key) == 20:
523 key = hex(key)
529 key = hex(key)
524 except:
530 except:
525 pass
531 pass
526 raise error.RepoError(_("unknown revision '%s'") % key)
532 raise error.RepoError(_("unknown revision '%s'") % key)
527
533
528 def local(self):
534 def local(self):
529 return True
535 return True
530
536
531 def join(self, f):
537 def join(self, f):
532 return os.path.join(self.path, f)
538 return os.path.join(self.path, f)
533
539
534 def wjoin(self, f):
540 def wjoin(self, f):
535 return os.path.join(self.root, f)
541 return os.path.join(self.root, f)
536
542
537 def rjoin(self, f):
543 def rjoin(self, f):
538 return os.path.join(self.root, util.pconvert(f))
544 return os.path.join(self.root, util.pconvert(f))
539
545
540 def file(self, f):
546 def file(self, f):
541 if f[0] == '/':
547 if f[0] == '/':
542 f = f[1:]
548 f = f[1:]
543 return filelog.filelog(self.sopener, f)
549 return filelog.filelog(self.sopener, f)
544
550
545 def changectx(self, changeid):
551 def changectx(self, changeid):
546 return self[changeid]
552 return self[changeid]
547
553
548 def parents(self, changeid=None):
554 def parents(self, changeid=None):
549 '''get list of changectxs for parents of changeid'''
555 '''get list of changectxs for parents of changeid'''
550 return self[changeid].parents()
556 return self[changeid].parents()
551
557
552 def filectx(self, path, changeid=None, fileid=None):
558 def filectx(self, path, changeid=None, fileid=None):
553 """changeid can be a changeset revision, node, or tag.
559 """changeid can be a changeset revision, node, or tag.
554 fileid can be a file revision or node."""
560 fileid can be a file revision or node."""
555 return context.filectx(self, path, changeid, fileid)
561 return context.filectx(self, path, changeid, fileid)
556
562
557 def getcwd(self):
563 def getcwd(self):
558 return self.dirstate.getcwd()
564 return self.dirstate.getcwd()
559
565
560 def pathto(self, f, cwd=None):
566 def pathto(self, f, cwd=None):
561 return self.dirstate.pathto(f, cwd)
567 return self.dirstate.pathto(f, cwd)
562
568
563 def wfile(self, f, mode='r'):
569 def wfile(self, f, mode='r'):
564 return self.wopener(f, mode)
570 return self.wopener(f, mode)
565
571
566 def _link(self, f):
572 def _link(self, f):
567 return os.path.islink(self.wjoin(f))
573 return os.path.islink(self.wjoin(f))
568
574
569 def _filter(self, filter, filename, data):
575 def _filter(self, filter, filename, data):
570 if filter not in self.filterpats:
576 if filter not in self.filterpats:
571 l = []
577 l = []
572 for pat, cmd in self.ui.configitems(filter):
578 for pat, cmd in self.ui.configitems(filter):
573 if cmd == '!':
579 if cmd == '!':
574 continue
580 continue
575 mf = match_.match(self.root, '', [pat])
581 mf = match_.match(self.root, '', [pat])
576 fn = None
582 fn = None
577 params = cmd
583 params = cmd
578 for name, filterfn in self._datafilters.iteritems():
584 for name, filterfn in self._datafilters.iteritems():
579 if cmd.startswith(name):
585 if cmd.startswith(name):
580 fn = filterfn
586 fn = filterfn
581 params = cmd[len(name):].lstrip()
587 params = cmd[len(name):].lstrip()
582 break
588 break
583 if not fn:
589 if not fn:
584 fn = lambda s, c, **kwargs: util.filter(s, c)
590 fn = lambda s, c, **kwargs: util.filter(s, c)
585 # Wrap old filters not supporting keyword arguments
591 # Wrap old filters not supporting keyword arguments
586 if not inspect.getargspec(fn)[2]:
592 if not inspect.getargspec(fn)[2]:
587 oldfn = fn
593 oldfn = fn
588 fn = lambda s, c, **kwargs: oldfn(s, c)
594 fn = lambda s, c, **kwargs: oldfn(s, c)
589 l.append((mf, fn, params))
595 l.append((mf, fn, params))
590 self.filterpats[filter] = l
596 self.filterpats[filter] = l
591
597
592 for mf, fn, cmd in self.filterpats[filter]:
598 for mf, fn, cmd in self.filterpats[filter]:
593 if mf(filename):
599 if mf(filename):
594 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
600 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
601 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 break
602 break
597
603
598 return data
604 return data
599
605
600 def adddatafilter(self, name, filter):
606 def adddatafilter(self, name, filter):
601 self._datafilters[name] = filter
607 self._datafilters[name] = filter
602
608
603 def wread(self, filename):
609 def wread(self, filename):
604 if self._link(filename):
610 if self._link(filename):
605 data = os.readlink(self.wjoin(filename))
611 data = os.readlink(self.wjoin(filename))
606 else:
612 else:
607 data = self.wopener(filename, 'r').read()
613 data = self.wopener(filename, 'r').read()
608 return self._filter("encode", filename, data)
614 return self._filter("encode", filename, data)
609
615
610 def wwrite(self, filename, data, flags):
616 def wwrite(self, filename, data, flags):
611 data = self._filter("decode", filename, data)
617 data = self._filter("decode", filename, data)
612 try:
618 try:
613 os.unlink(self.wjoin(filename))
619 os.unlink(self.wjoin(filename))
614 except OSError:
620 except OSError:
615 pass
621 pass
616 if 'l' in flags:
622 if 'l' in flags:
617 self.wopener.symlink(data, filename)
623 self.wopener.symlink(data, filename)
618 else:
624 else:
619 self.wopener(filename, 'w').write(data)
625 self.wopener(filename, 'w').write(data)
620 if 'x' in flags:
626 if 'x' in flags:
621 util.set_flags(self.wjoin(filename), False, True)
627 util.set_flags(self.wjoin(filename), False, True)
622
628
623 def wwritedata(self, filename, data):
629 def wwritedata(self, filename, data):
624 return self._filter("decode", filename, data)
630 return self._filter("decode", filename, data)
625
631
626 def transaction(self):
632 def transaction(self):
627 tr = self._transref and self._transref() or None
633 tr = self._transref and self._transref() or None
628 if tr and tr.running():
634 if tr and tr.running():
629 return tr.nest()
635 return tr.nest()
630
636
631 # abort here if the journal already exists
637 # abort here if the journal already exists
632 if os.path.exists(self.sjoin("journal")):
638 if os.path.exists(self.sjoin("journal")):
633 raise error.RepoError(_("journal already exists - run hg recover"))
639 raise error.RepoError(_("journal already exists - run hg recover"))
634
640
635 # save dirstate for rollback
641 # save dirstate for rollback
636 try:
642 try:
637 ds = self.opener("dirstate").read()
643 ds = self.opener("dirstate").read()
638 except IOError:
644 except IOError:
639 ds = ""
645 ds = ""
640 self.opener("journal.dirstate", "w").write(ds)
646 self.opener("journal.dirstate", "w").write(ds)
641 self.opener("journal.branch", "w").write(self.dirstate.branch())
647 self.opener("journal.branch", "w").write(self.dirstate.branch())
642
648
643 renames = [(self.sjoin("journal"), self.sjoin("undo")),
649 renames = [(self.sjoin("journal"), self.sjoin("undo")),
644 (self.join("journal.dirstate"), self.join("undo.dirstate")),
650 (self.join("journal.dirstate"), self.join("undo.dirstate")),
645 (self.join("journal.branch"), self.join("undo.branch"))]
651 (self.join("journal.branch"), self.join("undo.branch"))]
646 tr = transaction.transaction(self.ui.warn, self.sopener,
652 tr = transaction.transaction(self.ui.warn, self.sopener,
647 self.sjoin("journal"),
653 self.sjoin("journal"),
648 aftertrans(renames),
654 aftertrans(renames),
649 self.store.createmode)
655 self.store.createmode)
650 self._transref = weakref.ref(tr)
656 self._transref = weakref.ref(tr)
651 return tr
657 return tr
652
658
653 def recover(self):
659 def recover(self):
654 lock = self.lock()
660 lock = self.lock()
655 try:
661 try:
656 if os.path.exists(self.sjoin("journal")):
662 if os.path.exists(self.sjoin("journal")):
657 self.ui.status(_("rolling back interrupted transaction\n"))
663 self.ui.status(_("rolling back interrupted transaction\n"))
658 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
664 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
659 self.invalidate()
665 self.invalidate()
660 return True
666 return True
661 else:
667 else:
662 self.ui.warn(_("no interrupted transaction available\n"))
668 self.ui.warn(_("no interrupted transaction available\n"))
663 return False
669 return False
664 finally:
670 finally:
665 lock.release()
671 lock.release()
666
672
667 def rollback(self):
673 def rollback(self):
668 wlock = lock = None
674 wlock = lock = None
669 try:
675 try:
670 wlock = self.wlock()
676 wlock = self.wlock()
671 lock = self.lock()
677 lock = self.lock()
672 if os.path.exists(self.sjoin("undo")):
678 if os.path.exists(self.sjoin("undo")):
673 self.ui.status(_("rolling back last transaction\n"))
679 self.ui.status(_("rolling back last transaction\n"))
674 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
680 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
675 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
681 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
676 try:
682 try:
677 branch = self.opener("undo.branch").read()
683 branch = self.opener("undo.branch").read()
678 self.dirstate.setbranch(branch)
684 self.dirstate.setbranch(branch)
679 except IOError:
685 except IOError:
680 self.ui.warn(_("Named branch could not be reset, "
686 self.ui.warn(_("Named branch could not be reset, "
681 "current branch still is: %s\n")
687 "current branch still is: %s\n")
682 % encoding.tolocal(self.dirstate.branch()))
688 % encoding.tolocal(self.dirstate.branch()))
683 self.invalidate()
689 self.invalidate()
684 self.dirstate.invalidate()
690 self.dirstate.invalidate()
685 else:
691 else:
686 self.ui.warn(_("no rollback information available\n"))
692 self.ui.warn(_("no rollback information available\n"))
687 finally:
693 finally:
688 release(lock, wlock)
694 release(lock, wlock)
689
695
690 def invalidate(self):
696 def invalidate(self):
691 for a in "changelog manifest".split():
697 for a in "changelog manifest".split():
692 if a in self.__dict__:
698 if a in self.__dict__:
693 delattr(self, a)
699 delattr(self, a)
694 self.tagscache = None
700 self._tags = None
695 self._tagstypecache = None
701 self._tagtypes = None
696 self.nodetagscache = None
702 self.nodetagscache = None
697 self.branchcache = None
703 self.branchcache = None
698 self._ubranchcache = None
704 self._ubranchcache = None
699 self._branchcachetip = None
705 self._branchcachetip = None
700
706
701 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
707 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
702 try:
708 try:
703 l = lock.lock(lockname, 0, releasefn, desc=desc)
709 l = lock.lock(lockname, 0, releasefn, desc=desc)
704 except error.LockHeld, inst:
710 except error.LockHeld, inst:
705 if not wait:
711 if not wait:
706 raise
712 raise
707 self.ui.warn(_("waiting for lock on %s held by %r\n") %
713 self.ui.warn(_("waiting for lock on %s held by %r\n") %
708 (desc, inst.locker))
714 (desc, inst.locker))
709 # default to 600 seconds timeout
715 # default to 600 seconds timeout
710 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
716 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
711 releasefn, desc=desc)
717 releasefn, desc=desc)
712 if acquirefn:
718 if acquirefn:
713 acquirefn()
719 acquirefn()
714 return l
720 return l
715
721
716 def lock(self, wait=True):
722 def lock(self, wait=True):
717 l = self._lockref and self._lockref()
723 l = self._lockref and self._lockref()
718 if l is not None and l.held:
724 if l is not None and l.held:
719 l.lock()
725 l.lock()
720 return l
726 return l
721
727
722 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
728 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
723 _('repository %s') % self.origroot)
729 _('repository %s') % self.origroot)
724 self._lockref = weakref.ref(l)
730 self._lockref = weakref.ref(l)
725 return l
731 return l
726
732
727 def wlock(self, wait=True):
733 def wlock(self, wait=True):
728 l = self._wlockref and self._wlockref()
734 l = self._wlockref and self._wlockref()
729 if l is not None and l.held:
735 if l is not None and l.held:
730 l.lock()
736 l.lock()
731 return l
737 return l
732
738
733 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
739 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
734 self.dirstate.invalidate, _('working directory of %s') %
740 self.dirstate.invalidate, _('working directory of %s') %
735 self.origroot)
741 self.origroot)
736 self._wlockref = weakref.ref(l)
742 self._wlockref = weakref.ref(l)
737 return l
743 return l
738
744
739 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
745 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
740 """
746 """
741 commit an individual file as part of a larger transaction
747 commit an individual file as part of a larger transaction
742 """
748 """
743
749
744 fname = fctx.path()
750 fname = fctx.path()
745 text = fctx.data()
751 text = fctx.data()
746 flog = self.file(fname)
752 flog = self.file(fname)
747 fparent1 = manifest1.get(fname, nullid)
753 fparent1 = manifest1.get(fname, nullid)
748 fparent2 = fparent2o = manifest2.get(fname, nullid)
754 fparent2 = fparent2o = manifest2.get(fname, nullid)
749
755
750 meta = {}
756 meta = {}
751 copy = fctx.renamed()
757 copy = fctx.renamed()
752 if copy and copy[0] != fname:
758 if copy and copy[0] != fname:
753 # Mark the new revision of this file as a copy of another
759 # Mark the new revision of this file as a copy of another
754 # file. This copy data will effectively act as a parent
760 # file. This copy data will effectively act as a parent
755 # of this new revision. If this is a merge, the first
761 # of this new revision. If this is a merge, the first
756 # parent will be the nullid (meaning "look up the copy data")
762 # parent will be the nullid (meaning "look up the copy data")
757 # and the second one will be the other parent. For example:
763 # and the second one will be the other parent. For example:
758 #
764 #
759 # 0 --- 1 --- 3 rev1 changes file foo
765 # 0 --- 1 --- 3 rev1 changes file foo
760 # \ / rev2 renames foo to bar and changes it
766 # \ / rev2 renames foo to bar and changes it
761 # \- 2 -/ rev3 should have bar with all changes and
767 # \- 2 -/ rev3 should have bar with all changes and
762 # should record that bar descends from
768 # should record that bar descends from
763 # bar in rev2 and foo in rev1
769 # bar in rev2 and foo in rev1
764 #
770 #
765 # this allows this merge to succeed:
771 # this allows this merge to succeed:
766 #
772 #
767 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
773 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
768 # \ / merging rev3 and rev4 should use bar@rev2
774 # \ / merging rev3 and rev4 should use bar@rev2
769 # \- 2 --- 4 as the merge base
775 # \- 2 --- 4 as the merge base
770 #
776 #
771
777
772 cfname = copy[0]
778 cfname = copy[0]
773 crev = manifest1.get(cfname)
779 crev = manifest1.get(cfname)
774 newfparent = fparent2
780 newfparent = fparent2
775
781
776 if manifest2: # branch merge
782 if manifest2: # branch merge
777 if fparent2 == nullid or crev is None: # copied on remote side
783 if fparent2 == nullid or crev is None: # copied on remote side
778 if cfname in manifest2:
784 if cfname in manifest2:
779 crev = manifest2[cfname]
785 crev = manifest2[cfname]
780 newfparent = fparent1
786 newfparent = fparent1
781
787
782 # find source in nearest ancestor if we've lost track
788 # find source in nearest ancestor if we've lost track
783 if not crev:
789 if not crev:
784 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
790 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
785 (fname, cfname))
791 (fname, cfname))
786 for ancestor in self['.'].ancestors():
792 for ancestor in self['.'].ancestors():
787 if cfname in ancestor:
793 if cfname in ancestor:
788 crev = ancestor[cfname].filenode()
794 crev = ancestor[cfname].filenode()
789 break
795 break
790
796
791 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
797 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
792 meta["copy"] = cfname
798 meta["copy"] = cfname
793 meta["copyrev"] = hex(crev)
799 meta["copyrev"] = hex(crev)
794 fparent1, fparent2 = nullid, newfparent
800 fparent1, fparent2 = nullid, newfparent
795 elif fparent2 != nullid:
801 elif fparent2 != nullid:
796 # is one parent an ancestor of the other?
802 # is one parent an ancestor of the other?
797 fparentancestor = flog.ancestor(fparent1, fparent2)
803 fparentancestor = flog.ancestor(fparent1, fparent2)
798 if fparentancestor == fparent1:
804 if fparentancestor == fparent1:
799 fparent1, fparent2 = fparent2, nullid
805 fparent1, fparent2 = fparent2, nullid
800 elif fparentancestor == fparent2:
806 elif fparentancestor == fparent2:
801 fparent2 = nullid
807 fparent2 = nullid
802
808
803 # is the file changed?
809 # is the file changed?
804 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
810 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
805 changelist.append(fname)
811 changelist.append(fname)
806 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
812 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
807
813
808 # are just the flags changed during merge?
814 # are just the flags changed during merge?
809 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
815 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
810 changelist.append(fname)
816 changelist.append(fname)
811
817
812 return fparent1
818 return fparent1
813
819
814 def commit(self, text="", user=None, date=None, match=None, force=False,
820 def commit(self, text="", user=None, date=None, match=None, force=False,
815 editor=False, extra={}):
821 editor=False, extra={}):
816 """Add a new revision to current repository.
822 """Add a new revision to current repository.
817
823
818 Revision information is gathered from the working directory,
824 Revision information is gathered from the working directory,
819 match can be used to filter the committed files. If editor is
825 match can be used to filter the committed files. If editor is
820 supplied, it is called to get a commit message.
826 supplied, it is called to get a commit message.
821 """
827 """
822
828
823 def fail(f, msg):
829 def fail(f, msg):
824 raise util.Abort('%s: %s' % (f, msg))
830 raise util.Abort('%s: %s' % (f, msg))
825
831
826 if not match:
832 if not match:
827 match = match_.always(self.root, '')
833 match = match_.always(self.root, '')
828
834
829 if not force:
835 if not force:
830 vdirs = []
836 vdirs = []
831 match.dir = vdirs.append
837 match.dir = vdirs.append
832 match.bad = fail
838 match.bad = fail
833
839
834 wlock = self.wlock()
840 wlock = self.wlock()
835 try:
841 try:
836 p1, p2 = self.dirstate.parents()
842 p1, p2 = self.dirstate.parents()
837 wctx = self[None]
843 wctx = self[None]
838
844
839 if (not force and p2 != nullid and match and
845 if (not force and p2 != nullid and match and
840 (match.files() or match.anypats())):
846 (match.files() or match.anypats())):
841 raise util.Abort(_('cannot partially commit a merge '
847 raise util.Abort(_('cannot partially commit a merge '
842 '(do not specify files or patterns)'))
848 '(do not specify files or patterns)'))
843
849
844 changes = self.status(match=match, clean=force)
850 changes = self.status(match=match, clean=force)
845 if force:
851 if force:
846 changes[0].extend(changes[6]) # mq may commit unchanged files
852 changes[0].extend(changes[6]) # mq may commit unchanged files
847
853
848 # check subrepos
854 # check subrepos
849 subs = []
855 subs = []
850 for s in wctx.substate:
856 for s in wctx.substate:
851 if match(s) and wctx.sub(s).dirty():
857 if match(s) and wctx.sub(s).dirty():
852 subs.append(s)
858 subs.append(s)
853 if subs and '.hgsubstate' not in changes[0]:
859 if subs and '.hgsubstate' not in changes[0]:
854 changes[0].insert(0, '.hgsubstate')
860 changes[0].insert(0, '.hgsubstate')
855
861
856 # make sure all explicit patterns are matched
862 # make sure all explicit patterns are matched
857 if not force and match.files():
863 if not force and match.files():
858 matched = set(changes[0] + changes[1] + changes[2])
864 matched = set(changes[0] + changes[1] + changes[2])
859
865
860 for f in match.files():
866 for f in match.files():
861 if f == '.' or f in matched or f in wctx.substate:
867 if f == '.' or f in matched or f in wctx.substate:
862 continue
868 continue
863 if f in changes[3]: # missing
869 if f in changes[3]: # missing
864 fail(f, _('file not found!'))
870 fail(f, _('file not found!'))
865 if f in vdirs: # visited directory
871 if f in vdirs: # visited directory
866 d = f + '/'
872 d = f + '/'
867 for mf in matched:
873 for mf in matched:
868 if mf.startswith(d):
874 if mf.startswith(d):
869 break
875 break
870 else:
876 else:
871 fail(f, _("no match under directory!"))
877 fail(f, _("no match under directory!"))
872 elif f not in self.dirstate:
878 elif f not in self.dirstate:
873 fail(f, _("file not tracked!"))
879 fail(f, _("file not tracked!"))
874
880
875 if (not force and not extra.get("close") and p2 == nullid
881 if (not force and not extra.get("close") and p2 == nullid
876 and not (changes[0] or changes[1] or changes[2])
882 and not (changes[0] or changes[1] or changes[2])
877 and self[None].branch() == self['.'].branch()):
883 and self[None].branch() == self['.'].branch()):
878 return None
884 return None
879
885
880 ms = merge_.mergestate(self)
886 ms = merge_.mergestate(self)
881 for f in changes[0]:
887 for f in changes[0]:
882 if f in ms and ms[f] == 'u':
888 if f in ms and ms[f] == 'u':
883 raise util.Abort(_("unresolved merge conflicts "
889 raise util.Abort(_("unresolved merge conflicts "
884 "(see hg resolve)"))
890 "(see hg resolve)"))
885
891
886 cctx = context.workingctx(self, (p1, p2), text, user, date,
892 cctx = context.workingctx(self, (p1, p2), text, user, date,
887 extra, changes)
893 extra, changes)
888 if editor:
894 if editor:
889 cctx._text = editor(self, cctx, subs)
895 cctx._text = editor(self, cctx, subs)
890
896
891 # commit subs
897 # commit subs
892 if subs:
898 if subs:
893 state = wctx.substate.copy()
899 state = wctx.substate.copy()
894 for s in subs:
900 for s in subs:
895 self.ui.status(_('committing subrepository %s\n') % s)
901 self.ui.status(_('committing subrepository %s\n') % s)
896 sr = wctx.sub(s).commit(cctx._text, user, date)
902 sr = wctx.sub(s).commit(cctx._text, user, date)
897 state[s] = (state[s][0], sr)
903 state[s] = (state[s][0], sr)
898 subrepo.writestate(self, state)
904 subrepo.writestate(self, state)
899
905
900 ret = self.commitctx(cctx, True)
906 ret = self.commitctx(cctx, True)
901
907
902 # update dirstate and mergestate
908 # update dirstate and mergestate
903 for f in changes[0] + changes[1]:
909 for f in changes[0] + changes[1]:
904 self.dirstate.normal(f)
910 self.dirstate.normal(f)
905 for f in changes[2]:
911 for f in changes[2]:
906 self.dirstate.forget(f)
912 self.dirstate.forget(f)
907 self.dirstate.setparents(ret)
913 self.dirstate.setparents(ret)
908 ms.reset()
914 ms.reset()
909
915
910 return ret
916 return ret
911
917
912 finally:
918 finally:
913 wlock.release()
919 wlock.release()
914
920
915 def commitctx(self, ctx, error=False):
921 def commitctx(self, ctx, error=False):
916 """Add a new revision to current repository.
922 """Add a new revision to current repository.
917
923
918 Revision information is passed via the context argument.
924 Revision information is passed via the context argument.
919 """
925 """
920
926
921 tr = lock = None
927 tr = lock = None
922 removed = ctx.removed()
928 removed = ctx.removed()
923 p1, p2 = ctx.p1(), ctx.p2()
929 p1, p2 = ctx.p1(), ctx.p2()
924 m1 = p1.manifest().copy()
930 m1 = p1.manifest().copy()
925 m2 = p2.manifest()
931 m2 = p2.manifest()
926 user = ctx.user()
932 user = ctx.user()
927
933
928 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
934 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
929 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
935 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
930
936
931 lock = self.lock()
937 lock = self.lock()
932 try:
938 try:
933 tr = self.transaction()
939 tr = self.transaction()
934 trp = weakref.proxy(tr)
940 trp = weakref.proxy(tr)
935
941
936 # check in files
942 # check in files
937 new = {}
943 new = {}
938 changed = []
944 changed = []
939 linkrev = len(self)
945 linkrev = len(self)
940 for f in sorted(ctx.modified() + ctx.added()):
946 for f in sorted(ctx.modified() + ctx.added()):
941 self.ui.note(f + "\n")
947 self.ui.note(f + "\n")
942 try:
948 try:
943 fctx = ctx[f]
949 fctx = ctx[f]
944 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
950 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
945 changed)
951 changed)
946 m1.set(f, fctx.flags())
952 m1.set(f, fctx.flags())
947 except (OSError, IOError):
953 except (OSError, IOError):
948 if error:
954 if error:
949 self.ui.warn(_("trouble committing %s!\n") % f)
955 self.ui.warn(_("trouble committing %s!\n") % f)
950 raise
956 raise
951 else:
957 else:
952 removed.append(f)
958 removed.append(f)
953
959
954 # update manifest
960 # update manifest
955 m1.update(new)
961 m1.update(new)
956 removed = [f for f in sorted(removed) if f in m1 or f in m2]
962 removed = [f for f in sorted(removed) if f in m1 or f in m2]
957 drop = [f for f in removed if f in m1]
963 drop = [f for f in removed if f in m1]
958 for f in drop:
964 for f in drop:
959 del m1[f]
965 del m1[f]
960 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
966 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
961 p2.manifestnode(), (new, drop))
967 p2.manifestnode(), (new, drop))
962
968
963 # update changelog
969 # update changelog
964 self.changelog.delayupdate()
970 self.changelog.delayupdate()
965 n = self.changelog.add(mn, changed + removed, ctx.description(),
971 n = self.changelog.add(mn, changed + removed, ctx.description(),
966 trp, p1.node(), p2.node(),
972 trp, p1.node(), p2.node(),
967 user, ctx.date(), ctx.extra().copy())
973 user, ctx.date(), ctx.extra().copy())
968 p = lambda: self.changelog.writepending() and self.root or ""
974 p = lambda: self.changelog.writepending() and self.root or ""
969 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
970 parent2=xp2, pending=p)
976 parent2=xp2, pending=p)
971 self.changelog.finalize(trp)
977 self.changelog.finalize(trp)
972 tr.close()
978 tr.close()
973
979
974 if self.branchcache:
980 if self.branchcache:
975 self.branchtags()
981 self.branchtags()
976
982
977 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
983 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
978 return n
984 return n
979 finally:
985 finally:
980 del tr
986 del tr
981 lock.release()
987 lock.release()
982
988
983 def walk(self, match, node=None):
989 def walk(self, match, node=None):
984 '''
990 '''
985 walk recursively through the directory tree or a given
991 walk recursively through the directory tree or a given
986 changeset, finding all files matched by the match
992 changeset, finding all files matched by the match
987 function
993 function
988 '''
994 '''
989 return self[node].walk(match)
995 return self[node].walk(match)
990
996
991 def status(self, node1='.', node2=None, match=None,
997 def status(self, node1='.', node2=None, match=None,
992 ignored=False, clean=False, unknown=False):
998 ignored=False, clean=False, unknown=False):
993 """return status of files between two nodes or node and working directory
999 """return status of files between two nodes or node and working directory
994
1000
995 If node1 is None, use the first dirstate parent instead.
1001 If node1 is None, use the first dirstate parent instead.
996 If node2 is None, compare node1 with working directory.
1002 If node2 is None, compare node1 with working directory.
997 """
1003 """
998
1004
999 def mfmatches(ctx):
1005 def mfmatches(ctx):
1000 mf = ctx.manifest().copy()
1006 mf = ctx.manifest().copy()
1001 for fn in mf.keys():
1007 for fn in mf.keys():
1002 if not match(fn):
1008 if not match(fn):
1003 del mf[fn]
1009 del mf[fn]
1004 return mf
1010 return mf
1005
1011
1006 if isinstance(node1, context.changectx):
1012 if isinstance(node1, context.changectx):
1007 ctx1 = node1
1013 ctx1 = node1
1008 else:
1014 else:
1009 ctx1 = self[node1]
1015 ctx1 = self[node1]
1010 if isinstance(node2, context.changectx):
1016 if isinstance(node2, context.changectx):
1011 ctx2 = node2
1017 ctx2 = node2
1012 else:
1018 else:
1013 ctx2 = self[node2]
1019 ctx2 = self[node2]
1014
1020
1015 working = ctx2.rev() is None
1021 working = ctx2.rev() is None
1016 parentworking = working and ctx1 == self['.']
1022 parentworking = working and ctx1 == self['.']
1017 match = match or match_.always(self.root, self.getcwd())
1023 match = match or match_.always(self.root, self.getcwd())
1018 listignored, listclean, listunknown = ignored, clean, unknown
1024 listignored, listclean, listunknown = ignored, clean, unknown
1019
1025
1020 # load earliest manifest first for caching reasons
1026 # load earliest manifest first for caching reasons
1021 if not working and ctx2.rev() < ctx1.rev():
1027 if not working and ctx2.rev() < ctx1.rev():
1022 ctx2.manifest()
1028 ctx2.manifest()
1023
1029
1024 if not parentworking:
1030 if not parentworking:
1025 def bad(f, msg):
1031 def bad(f, msg):
1026 if f not in ctx1:
1032 if f not in ctx1:
1027 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1033 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1028 match.bad = bad
1034 match.bad = bad
1029
1035
1030 if working: # we need to scan the working dir
1036 if working: # we need to scan the working dir
1031 s = self.dirstate.status(match, listignored, listclean, listunknown)
1037 s = self.dirstate.status(match, listignored, listclean, listunknown)
1032 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1038 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1033
1039
1034 # check for any possibly clean files
1040 # check for any possibly clean files
1035 if parentworking and cmp:
1041 if parentworking and cmp:
1036 fixup = []
1042 fixup = []
1037 # do a full compare of any files that might have changed
1043 # do a full compare of any files that might have changed
1038 for f in sorted(cmp):
1044 for f in sorted(cmp):
1039 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1045 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1040 or ctx1[f].cmp(ctx2[f].data())):
1046 or ctx1[f].cmp(ctx2[f].data())):
1041 modified.append(f)
1047 modified.append(f)
1042 else:
1048 else:
1043 fixup.append(f)
1049 fixup.append(f)
1044
1050
1045 if listclean:
1051 if listclean:
1046 clean += fixup
1052 clean += fixup
1047
1053
1048 # update dirstate for files that are actually clean
1054 # update dirstate for files that are actually clean
1049 if fixup:
1055 if fixup:
1050 try:
1056 try:
1051 # updating the dirstate is optional
1057 # updating the dirstate is optional
1052 # so we don't wait on the lock
1058 # so we don't wait on the lock
1053 wlock = self.wlock(False)
1059 wlock = self.wlock(False)
1054 try:
1060 try:
1055 for f in fixup:
1061 for f in fixup:
1056 self.dirstate.normal(f)
1062 self.dirstate.normal(f)
1057 finally:
1063 finally:
1058 wlock.release()
1064 wlock.release()
1059 except error.LockError:
1065 except error.LockError:
1060 pass
1066 pass
1061
1067
1062 if not parentworking:
1068 if not parentworking:
1063 mf1 = mfmatches(ctx1)
1069 mf1 = mfmatches(ctx1)
1064 if working:
1070 if working:
1065 # we are comparing working dir against non-parent
1071 # we are comparing working dir against non-parent
1066 # generate a pseudo-manifest for the working dir
1072 # generate a pseudo-manifest for the working dir
1067 mf2 = mfmatches(self['.'])
1073 mf2 = mfmatches(self['.'])
1068 for f in cmp + modified + added:
1074 for f in cmp + modified + added:
1069 mf2[f] = None
1075 mf2[f] = None
1070 mf2.set(f, ctx2.flags(f))
1076 mf2.set(f, ctx2.flags(f))
1071 for f in removed:
1077 for f in removed:
1072 if f in mf2:
1078 if f in mf2:
1073 del mf2[f]
1079 del mf2[f]
1074 else:
1080 else:
1075 # we are comparing two revisions
1081 # we are comparing two revisions
1076 deleted, unknown, ignored = [], [], []
1082 deleted, unknown, ignored = [], [], []
1077 mf2 = mfmatches(ctx2)
1083 mf2 = mfmatches(ctx2)
1078
1084
1079 modified, added, clean = [], [], []
1085 modified, added, clean = [], [], []
1080 for fn in mf2:
1086 for fn in mf2:
1081 if fn in mf1:
1087 if fn in mf1:
1082 if (mf1.flags(fn) != mf2.flags(fn) or
1088 if (mf1.flags(fn) != mf2.flags(fn) or
1083 (mf1[fn] != mf2[fn] and
1089 (mf1[fn] != mf2[fn] and
1084 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1090 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1085 modified.append(fn)
1091 modified.append(fn)
1086 elif listclean:
1092 elif listclean:
1087 clean.append(fn)
1093 clean.append(fn)
1088 del mf1[fn]
1094 del mf1[fn]
1089 else:
1095 else:
1090 added.append(fn)
1096 added.append(fn)
1091 removed = mf1.keys()
1097 removed = mf1.keys()
1092
1098
1093 r = modified, added, removed, deleted, unknown, ignored, clean
1099 r = modified, added, removed, deleted, unknown, ignored, clean
1094 [l.sort() for l in r]
1100 [l.sort() for l in r]
1095 return r
1101 return r
1096
1102
1097 def add(self, list):
1103 def add(self, list):
1098 wlock = self.wlock()
1104 wlock = self.wlock()
1099 try:
1105 try:
1100 rejected = []
1106 rejected = []
1101 for f in list:
1107 for f in list:
1102 p = self.wjoin(f)
1108 p = self.wjoin(f)
1103 try:
1109 try:
1104 st = os.lstat(p)
1110 st = os.lstat(p)
1105 except:
1111 except:
1106 self.ui.warn(_("%s does not exist!\n") % f)
1112 self.ui.warn(_("%s does not exist!\n") % f)
1107 rejected.append(f)
1113 rejected.append(f)
1108 continue
1114 continue
1109 if st.st_size > 10000000:
1115 if st.st_size > 10000000:
1110 self.ui.warn(_("%s: files over 10MB may cause memory and"
1116 self.ui.warn(_("%s: files over 10MB may cause memory and"
1111 " performance problems\n"
1117 " performance problems\n"
1112 "(use 'hg revert %s' to unadd the file)\n")
1118 "(use 'hg revert %s' to unadd the file)\n")
1113 % (f, f))
1119 % (f, f))
1114 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1120 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1115 self.ui.warn(_("%s not added: only files and symlinks "
1121 self.ui.warn(_("%s not added: only files and symlinks "
1116 "supported currently\n") % f)
1122 "supported currently\n") % f)
1117 rejected.append(p)
1123 rejected.append(p)
1118 elif self.dirstate[f] in 'amn':
1124 elif self.dirstate[f] in 'amn':
1119 self.ui.warn(_("%s already tracked!\n") % f)
1125 self.ui.warn(_("%s already tracked!\n") % f)
1120 elif self.dirstate[f] == 'r':
1126 elif self.dirstate[f] == 'r':
1121 self.dirstate.normallookup(f)
1127 self.dirstate.normallookup(f)
1122 else:
1128 else:
1123 self.dirstate.add(f)
1129 self.dirstate.add(f)
1124 return rejected
1130 return rejected
1125 finally:
1131 finally:
1126 wlock.release()
1132 wlock.release()
1127
1133
1128 def forget(self, list):
1134 def forget(self, list):
1129 wlock = self.wlock()
1135 wlock = self.wlock()
1130 try:
1136 try:
1131 for f in list:
1137 for f in list:
1132 if self.dirstate[f] != 'a':
1138 if self.dirstate[f] != 'a':
1133 self.ui.warn(_("%s not added!\n") % f)
1139 self.ui.warn(_("%s not added!\n") % f)
1134 else:
1140 else:
1135 self.dirstate.forget(f)
1141 self.dirstate.forget(f)
1136 finally:
1142 finally:
1137 wlock.release()
1143 wlock.release()
1138
1144
1139 def remove(self, list, unlink=False):
1145 def remove(self, list, unlink=False):
1140 if unlink:
1146 if unlink:
1141 for f in list:
1147 for f in list:
1142 try:
1148 try:
1143 util.unlink(self.wjoin(f))
1149 util.unlink(self.wjoin(f))
1144 except OSError, inst:
1150 except OSError, inst:
1145 if inst.errno != errno.ENOENT:
1151 if inst.errno != errno.ENOENT:
1146 raise
1152 raise
1147 wlock = self.wlock()
1153 wlock = self.wlock()
1148 try:
1154 try:
1149 for f in list:
1155 for f in list:
1150 if unlink and os.path.exists(self.wjoin(f)):
1156 if unlink and os.path.exists(self.wjoin(f)):
1151 self.ui.warn(_("%s still exists!\n") % f)
1157 self.ui.warn(_("%s still exists!\n") % f)
1152 elif self.dirstate[f] == 'a':
1158 elif self.dirstate[f] == 'a':
1153 self.dirstate.forget(f)
1159 self.dirstate.forget(f)
1154 elif f not in self.dirstate:
1160 elif f not in self.dirstate:
1155 self.ui.warn(_("%s not tracked!\n") % f)
1161 self.ui.warn(_("%s not tracked!\n") % f)
1156 else:
1162 else:
1157 self.dirstate.remove(f)
1163 self.dirstate.remove(f)
1158 finally:
1164 finally:
1159 wlock.release()
1165 wlock.release()
1160
1166
1161 def undelete(self, list):
1167 def undelete(self, list):
1162 manifests = [self.manifest.read(self.changelog.read(p)[0])
1168 manifests = [self.manifest.read(self.changelog.read(p)[0])
1163 for p in self.dirstate.parents() if p != nullid]
1169 for p in self.dirstate.parents() if p != nullid]
1164 wlock = self.wlock()
1170 wlock = self.wlock()
1165 try:
1171 try:
1166 for f in list:
1172 for f in list:
1167 if self.dirstate[f] != 'r':
1173 if self.dirstate[f] != 'r':
1168 self.ui.warn(_("%s not removed!\n") % f)
1174 self.ui.warn(_("%s not removed!\n") % f)
1169 else:
1175 else:
1170 m = f in manifests[0] and manifests[0] or manifests[1]
1176 m = f in manifests[0] and manifests[0] or manifests[1]
1171 t = self.file(f).read(m[f])
1177 t = self.file(f).read(m[f])
1172 self.wwrite(f, t, m.flags(f))
1178 self.wwrite(f, t, m.flags(f))
1173 self.dirstate.normal(f)
1179 self.dirstate.normal(f)
1174 finally:
1180 finally:
1175 wlock.release()
1181 wlock.release()
1176
1182
1177 def copy(self, source, dest):
1183 def copy(self, source, dest):
1178 p = self.wjoin(dest)
1184 p = self.wjoin(dest)
1179 if not (os.path.exists(p) or os.path.islink(p)):
1185 if not (os.path.exists(p) or os.path.islink(p)):
1180 self.ui.warn(_("%s does not exist!\n") % dest)
1186 self.ui.warn(_("%s does not exist!\n") % dest)
1181 elif not (os.path.isfile(p) or os.path.islink(p)):
1187 elif not (os.path.isfile(p) or os.path.islink(p)):
1182 self.ui.warn(_("copy failed: %s is not a file or a "
1188 self.ui.warn(_("copy failed: %s is not a file or a "
1183 "symbolic link\n") % dest)
1189 "symbolic link\n") % dest)
1184 else:
1190 else:
1185 wlock = self.wlock()
1191 wlock = self.wlock()
1186 try:
1192 try:
1187 if self.dirstate[dest] in '?r':
1193 if self.dirstate[dest] in '?r':
1188 self.dirstate.add(dest)
1194 self.dirstate.add(dest)
1189 self.dirstate.copy(source, dest)
1195 self.dirstate.copy(source, dest)
1190 finally:
1196 finally:
1191 wlock.release()
1197 wlock.release()
1192
1198
1193 def heads(self, start=None):
1199 def heads(self, start=None):
1194 heads = self.changelog.heads(start)
1200 heads = self.changelog.heads(start)
1195 # sort the output in rev descending order
1201 # sort the output in rev descending order
1196 heads = [(-self.changelog.rev(h), h) for h in heads]
1202 heads = [(-self.changelog.rev(h), h) for h in heads]
1197 return [n for (r, n) in sorted(heads)]
1203 return [n for (r, n) in sorted(heads)]
1198
1204
1199 def branchheads(self, branch=None, start=None, closed=False):
1205 def branchheads(self, branch=None, start=None, closed=False):
1200 if branch is None:
1206 if branch is None:
1201 branch = self[None].branch()
1207 branch = self[None].branch()
1202 branches = self.branchmap()
1208 branches = self.branchmap()
1203 if branch not in branches:
1209 if branch not in branches:
1204 return []
1210 return []
1205 bheads = branches[branch]
1211 bheads = branches[branch]
1206 # the cache returns heads ordered lowest to highest
1212 # the cache returns heads ordered lowest to highest
1207 bheads.reverse()
1213 bheads.reverse()
1208 if start is not None:
1214 if start is not None:
1209 # filter out the heads that cannot be reached from startrev
1215 # filter out the heads that cannot be reached from startrev
1210 bheads = self.changelog.nodesbetween([start], bheads)[2]
1216 bheads = self.changelog.nodesbetween([start], bheads)[2]
1211 if not closed:
1217 if not closed:
1212 bheads = [h for h in bheads if
1218 bheads = [h for h in bheads if
1213 ('close' not in self.changelog.read(h)[5])]
1219 ('close' not in self.changelog.read(h)[5])]
1214 return bheads
1220 return bheads
1215
1221
1216 def branches(self, nodes):
1222 def branches(self, nodes):
1217 if not nodes:
1223 if not nodes:
1218 nodes = [self.changelog.tip()]
1224 nodes = [self.changelog.tip()]
1219 b = []
1225 b = []
1220 for n in nodes:
1226 for n in nodes:
1221 t = n
1227 t = n
1222 while 1:
1228 while 1:
1223 p = self.changelog.parents(n)
1229 p = self.changelog.parents(n)
1224 if p[1] != nullid or p[0] == nullid:
1230 if p[1] != nullid or p[0] == nullid:
1225 b.append((t, n, p[0], p[1]))
1231 b.append((t, n, p[0], p[1]))
1226 break
1232 break
1227 n = p[0]
1233 n = p[0]
1228 return b
1234 return b
1229
1235
1230 def between(self, pairs):
1236 def between(self, pairs):
1231 r = []
1237 r = []
1232
1238
1233 for top, bottom in pairs:
1239 for top, bottom in pairs:
1234 n, l, i = top, [], 0
1240 n, l, i = top, [], 0
1235 f = 1
1241 f = 1
1236
1242
1237 while n != bottom and n != nullid:
1243 while n != bottom and n != nullid:
1238 p = self.changelog.parents(n)[0]
1244 p = self.changelog.parents(n)[0]
1239 if i == f:
1245 if i == f:
1240 l.append(n)
1246 l.append(n)
1241 f = f * 2
1247 f = f * 2
1242 n = p
1248 n = p
1243 i += 1
1249 i += 1
1244
1250
1245 r.append(l)
1251 r.append(l)
1246
1252
1247 return r
1253 return r
1248
1254
1249 def findincoming(self, remote, base=None, heads=None, force=False):
1255 def findincoming(self, remote, base=None, heads=None, force=False):
1250 """Return list of roots of the subsets of missing nodes from remote
1256 """Return list of roots of the subsets of missing nodes from remote
1251
1257
1252 If base dict is specified, assume that these nodes and their parents
1258 If base dict is specified, assume that these nodes and their parents
1253 exist on the remote side and that no child of a node of base exists
1259 exist on the remote side and that no child of a node of base exists
1254 in both remote and self.
1260 in both remote and self.
1255 Furthermore base will be updated to include the nodes that exists
1261 Furthermore base will be updated to include the nodes that exists
1256 in self and remote but no children exists in self and remote.
1262 in self and remote but no children exists in self and remote.
1257 If a list of heads is specified, return only nodes which are heads
1263 If a list of heads is specified, return only nodes which are heads
1258 or ancestors of these heads.
1264 or ancestors of these heads.
1259
1265
1260 All the ancestors of base are in self and in remote.
1266 All the ancestors of base are in self and in remote.
1261 All the descendants of the list returned are missing in self.
1267 All the descendants of the list returned are missing in self.
1262 (and so we know that the rest of the nodes are missing in remote, see
1268 (and so we know that the rest of the nodes are missing in remote, see
1263 outgoing)
1269 outgoing)
1264 """
1270 """
1265 return self.findcommonincoming(remote, base, heads, force)[1]
1271 return self.findcommonincoming(remote, base, heads, force)[1]
1266
1272
1267 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1273 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1268 """Return a tuple (common, missing roots, heads) used to identify
1274 """Return a tuple (common, missing roots, heads) used to identify
1269 missing nodes from remote.
1275 missing nodes from remote.
1270
1276
1271 If base dict is specified, assume that these nodes and their parents
1277 If base dict is specified, assume that these nodes and their parents
1272 exist on the remote side and that no child of a node of base exists
1278 exist on the remote side and that no child of a node of base exists
1273 in both remote and self.
1279 in both remote and self.
1274 Furthermore base will be updated to include the nodes that exists
1280 Furthermore base will be updated to include the nodes that exists
1275 in self and remote but no children exists in self and remote.
1281 in self and remote but no children exists in self and remote.
1276 If a list of heads is specified, return only nodes which are heads
1282 If a list of heads is specified, return only nodes which are heads
1277 or ancestors of these heads.
1283 or ancestors of these heads.
1278
1284
1279 All the ancestors of base are in self and in remote.
1285 All the ancestors of base are in self and in remote.
1280 """
1286 """
1281 m = self.changelog.nodemap
1287 m = self.changelog.nodemap
1282 search = []
1288 search = []
1283 fetch = set()
1289 fetch = set()
1284 seen = set()
1290 seen = set()
1285 seenbranch = set()
1291 seenbranch = set()
1286 if base is None:
1292 if base is None:
1287 base = {}
1293 base = {}
1288
1294
1289 if not heads:
1295 if not heads:
1290 heads = remote.heads()
1296 heads = remote.heads()
1291
1297
1292 if self.changelog.tip() == nullid:
1298 if self.changelog.tip() == nullid:
1293 base[nullid] = 1
1299 base[nullid] = 1
1294 if heads != [nullid]:
1300 if heads != [nullid]:
1295 return [nullid], [nullid], list(heads)
1301 return [nullid], [nullid], list(heads)
1296 return [nullid], [], []
1302 return [nullid], [], []
1297
1303
1298 # assume we're closer to the tip than the root
1304 # assume we're closer to the tip than the root
1299 # and start by examining the heads
1305 # and start by examining the heads
1300 self.ui.status(_("searching for changes\n"))
1306 self.ui.status(_("searching for changes\n"))
1301
1307
1302 unknown = []
1308 unknown = []
1303 for h in heads:
1309 for h in heads:
1304 if h not in m:
1310 if h not in m:
1305 unknown.append(h)
1311 unknown.append(h)
1306 else:
1312 else:
1307 base[h] = 1
1313 base[h] = 1
1308
1314
1309 heads = unknown
1315 heads = unknown
1310 if not unknown:
1316 if not unknown:
1311 return base.keys(), [], []
1317 return base.keys(), [], []
1312
1318
1313 req = set(unknown)
1319 req = set(unknown)
1314 reqcnt = 0
1320 reqcnt = 0
1315
1321
1316 # search through remote branches
1322 # search through remote branches
1317 # a 'branch' here is a linear segment of history, with four parts:
1323 # a 'branch' here is a linear segment of history, with four parts:
1318 # head, root, first parent, second parent
1324 # head, root, first parent, second parent
1319 # (a branch always has two parents (or none) by definition)
1325 # (a branch always has two parents (or none) by definition)
1320 unknown = remote.branches(unknown)
1326 unknown = remote.branches(unknown)
1321 while unknown:
1327 while unknown:
1322 r = []
1328 r = []
1323 while unknown:
1329 while unknown:
1324 n = unknown.pop(0)
1330 n = unknown.pop(0)
1325 if n[0] in seen:
1331 if n[0] in seen:
1326 continue
1332 continue
1327
1333
1328 self.ui.debug(_("examining %s:%s\n")
1334 self.ui.debug(_("examining %s:%s\n")
1329 % (short(n[0]), short(n[1])))
1335 % (short(n[0]), short(n[1])))
1330 if n[0] == nullid: # found the end of the branch
1336 if n[0] == nullid: # found the end of the branch
1331 pass
1337 pass
1332 elif n in seenbranch:
1338 elif n in seenbranch:
1333 self.ui.debug(_("branch already found\n"))
1339 self.ui.debug(_("branch already found\n"))
1334 continue
1340 continue
1335 elif n[1] and n[1] in m: # do we know the base?
1341 elif n[1] and n[1] in m: # do we know the base?
1336 self.ui.debug(_("found incomplete branch %s:%s\n")
1342 self.ui.debug(_("found incomplete branch %s:%s\n")
1337 % (short(n[0]), short(n[1])))
1343 % (short(n[0]), short(n[1])))
1338 search.append(n[0:2]) # schedule branch range for scanning
1344 search.append(n[0:2]) # schedule branch range for scanning
1339 seenbranch.add(n)
1345 seenbranch.add(n)
1340 else:
1346 else:
1341 if n[1] not in seen and n[1] not in fetch:
1347 if n[1] not in seen and n[1] not in fetch:
1342 if n[2] in m and n[3] in m:
1348 if n[2] in m and n[3] in m:
1343 self.ui.debug(_("found new changeset %s\n") %
1349 self.ui.debug(_("found new changeset %s\n") %
1344 short(n[1]))
1350 short(n[1]))
1345 fetch.add(n[1]) # earliest unknown
1351 fetch.add(n[1]) # earliest unknown
1346 for p in n[2:4]:
1352 for p in n[2:4]:
1347 if p in m:
1353 if p in m:
1348 base[p] = 1 # latest known
1354 base[p] = 1 # latest known
1349
1355
1350 for p in n[2:4]:
1356 for p in n[2:4]:
1351 if p not in req and p not in m:
1357 if p not in req and p not in m:
1352 r.append(p)
1358 r.append(p)
1353 req.add(p)
1359 req.add(p)
1354 seen.add(n[0])
1360 seen.add(n[0])
1355
1361
1356 if r:
1362 if r:
1357 reqcnt += 1
1363 reqcnt += 1
1358 self.ui.debug(_("request %d: %s\n") %
1364 self.ui.debug(_("request %d: %s\n") %
1359 (reqcnt, " ".join(map(short, r))))
1365 (reqcnt, " ".join(map(short, r))))
1360 for p in xrange(0, len(r), 10):
1366 for p in xrange(0, len(r), 10):
1361 for b in remote.branches(r[p:p+10]):
1367 for b in remote.branches(r[p:p+10]):
1362 self.ui.debug(_("received %s:%s\n") %
1368 self.ui.debug(_("received %s:%s\n") %
1363 (short(b[0]), short(b[1])))
1369 (short(b[0]), short(b[1])))
1364 unknown.append(b)
1370 unknown.append(b)
1365
1371
1366 # do binary search on the branches we found
1372 # do binary search on the branches we found
1367 while search:
1373 while search:
1368 newsearch = []
1374 newsearch = []
1369 reqcnt += 1
1375 reqcnt += 1
1370 for n, l in zip(search, remote.between(search)):
1376 for n, l in zip(search, remote.between(search)):
1371 l.append(n[1])
1377 l.append(n[1])
1372 p = n[0]
1378 p = n[0]
1373 f = 1
1379 f = 1
1374 for i in l:
1380 for i in l:
1375 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1381 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1376 if i in m:
1382 if i in m:
1377 if f <= 2:
1383 if f <= 2:
1378 self.ui.debug(_("found new branch changeset %s\n") %
1384 self.ui.debug(_("found new branch changeset %s\n") %
1379 short(p))
1385 short(p))
1380 fetch.add(p)
1386 fetch.add(p)
1381 base[i] = 1
1387 base[i] = 1
1382 else:
1388 else:
1383 self.ui.debug(_("narrowed branch search to %s:%s\n")
1389 self.ui.debug(_("narrowed branch search to %s:%s\n")
1384 % (short(p), short(i)))
1390 % (short(p), short(i)))
1385 newsearch.append((p, i))
1391 newsearch.append((p, i))
1386 break
1392 break
1387 p, f = i, f * 2
1393 p, f = i, f * 2
1388 search = newsearch
1394 search = newsearch
1389
1395
1390 # sanity check our fetch list
1396 # sanity check our fetch list
1391 for f in fetch:
1397 for f in fetch:
1392 if f in m:
1398 if f in m:
1393 raise error.RepoError(_("already have changeset ")
1399 raise error.RepoError(_("already have changeset ")
1394 + short(f[:4]))
1400 + short(f[:4]))
1395
1401
1396 if base.keys() == [nullid]:
1402 if base.keys() == [nullid]:
1397 if force:
1403 if force:
1398 self.ui.warn(_("warning: repository is unrelated\n"))
1404 self.ui.warn(_("warning: repository is unrelated\n"))
1399 else:
1405 else:
1400 raise util.Abort(_("repository is unrelated"))
1406 raise util.Abort(_("repository is unrelated"))
1401
1407
1402 self.ui.debug(_("found new changesets starting at ") +
1408 self.ui.debug(_("found new changesets starting at ") +
1403 " ".join([short(f) for f in fetch]) + "\n")
1409 " ".join([short(f) for f in fetch]) + "\n")
1404
1410
1405 self.ui.debug(_("%d total queries\n") % reqcnt)
1411 self.ui.debug(_("%d total queries\n") % reqcnt)
1406
1412
1407 return base.keys(), list(fetch), heads
1413 return base.keys(), list(fetch), heads
1408
1414
1409 def findoutgoing(self, remote, base=None, heads=None, force=False):
1415 def findoutgoing(self, remote, base=None, heads=None, force=False):
1410 """Return list of nodes that are roots of subsets not in remote
1416 """Return list of nodes that are roots of subsets not in remote
1411
1417
1412 If base dict is specified, assume that these nodes and their parents
1418 If base dict is specified, assume that these nodes and their parents
1413 exist on the remote side.
1419 exist on the remote side.
1414 If a list of heads is specified, return only nodes which are heads
1420 If a list of heads is specified, return only nodes which are heads
1415 or ancestors of these heads, and return a second element which
1421 or ancestors of these heads, and return a second element which
1416 contains all remote heads which get new children.
1422 contains all remote heads which get new children.
1417 """
1423 """
1418 if base is None:
1424 if base is None:
1419 base = {}
1425 base = {}
1420 self.findincoming(remote, base, heads, force=force)
1426 self.findincoming(remote, base, heads, force=force)
1421
1427
1422 self.ui.debug(_("common changesets up to ")
1428 self.ui.debug(_("common changesets up to ")
1423 + " ".join(map(short, base.keys())) + "\n")
1429 + " ".join(map(short, base.keys())) + "\n")
1424
1430
1425 remain = set(self.changelog.nodemap)
1431 remain = set(self.changelog.nodemap)
1426
1432
1427 # prune everything remote has from the tree
1433 # prune everything remote has from the tree
1428 remain.remove(nullid)
1434 remain.remove(nullid)
1429 remove = base.keys()
1435 remove = base.keys()
1430 while remove:
1436 while remove:
1431 n = remove.pop(0)
1437 n = remove.pop(0)
1432 if n in remain:
1438 if n in remain:
1433 remain.remove(n)
1439 remain.remove(n)
1434 for p in self.changelog.parents(n):
1440 for p in self.changelog.parents(n):
1435 remove.append(p)
1441 remove.append(p)
1436
1442
1437 # find every node whose parents have been pruned
1443 # find every node whose parents have been pruned
1438 subset = []
1444 subset = []
1439 # find every remote head that will get new children
1445 # find every remote head that will get new children
1440 updated_heads = set()
1446 updated_heads = set()
1441 for n in remain:
1447 for n in remain:
1442 p1, p2 = self.changelog.parents(n)
1448 p1, p2 = self.changelog.parents(n)
1443 if p1 not in remain and p2 not in remain:
1449 if p1 not in remain and p2 not in remain:
1444 subset.append(n)
1450 subset.append(n)
1445 if heads:
1451 if heads:
1446 if p1 in heads:
1452 if p1 in heads:
1447 updated_heads.add(p1)
1453 updated_heads.add(p1)
1448 if p2 in heads:
1454 if p2 in heads:
1449 updated_heads.add(p2)
1455 updated_heads.add(p2)
1450
1456
1451 # this is the set of all roots we have to push
1457 # this is the set of all roots we have to push
1452 if heads:
1458 if heads:
1453 return subset, list(updated_heads)
1459 return subset, list(updated_heads)
1454 else:
1460 else:
1455 return subset
1461 return subset
1456
1462
1457 def pull(self, remote, heads=None, force=False):
1463 def pull(self, remote, heads=None, force=False):
1458 lock = self.lock()
1464 lock = self.lock()
1459 try:
1465 try:
1460 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1466 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1461 force=force)
1467 force=force)
1462 if fetch == [nullid]:
1468 if fetch == [nullid]:
1463 self.ui.status(_("requesting all changes\n"))
1469 self.ui.status(_("requesting all changes\n"))
1464
1470
1465 if not fetch:
1471 if not fetch:
1466 self.ui.status(_("no changes found\n"))
1472 self.ui.status(_("no changes found\n"))
1467 return 0
1473 return 0
1468
1474
1469 if heads is None and remote.capable('changegroupsubset'):
1475 if heads is None and remote.capable('changegroupsubset'):
1470 heads = rheads
1476 heads = rheads
1471
1477
1472 if heads is None:
1478 if heads is None:
1473 cg = remote.changegroup(fetch, 'pull')
1479 cg = remote.changegroup(fetch, 'pull')
1474 else:
1480 else:
1475 if not remote.capable('changegroupsubset'):
1481 if not remote.capable('changegroupsubset'):
1476 raise util.Abort(_("Partial pull cannot be done because "
1482 raise util.Abort(_("Partial pull cannot be done because "
1477 "other repository doesn't support "
1483 "other repository doesn't support "
1478 "changegroupsubset."))
1484 "changegroupsubset."))
1479 cg = remote.changegroupsubset(fetch, heads, 'pull')
1485 cg = remote.changegroupsubset(fetch, heads, 'pull')
1480 return self.addchangegroup(cg, 'pull', remote.url())
1486 return self.addchangegroup(cg, 'pull', remote.url())
1481 finally:
1487 finally:
1482 lock.release()
1488 lock.release()
1483
1489
1484 def push(self, remote, force=False, revs=None):
1490 def push(self, remote, force=False, revs=None):
1485 # there are two ways to push to remote repo:
1491 # there are two ways to push to remote repo:
1486 #
1492 #
1487 # addchangegroup assumes local user can lock remote
1493 # addchangegroup assumes local user can lock remote
1488 # repo (local filesystem, old ssh servers).
1494 # repo (local filesystem, old ssh servers).
1489 #
1495 #
1490 # unbundle assumes local user cannot lock remote repo (new ssh
1496 # unbundle assumes local user cannot lock remote repo (new ssh
1491 # servers, http servers).
1497 # servers, http servers).
1492
1498
1493 if remote.capable('unbundle'):
1499 if remote.capable('unbundle'):
1494 return self.push_unbundle(remote, force, revs)
1500 return self.push_unbundle(remote, force, revs)
1495 return self.push_addchangegroup(remote, force, revs)
1501 return self.push_addchangegroup(remote, force, revs)
1496
1502
1497 def prepush(self, remote, force, revs):
1503 def prepush(self, remote, force, revs):
1498 common = {}
1504 common = {}
1499 remote_heads = remote.heads()
1505 remote_heads = remote.heads()
1500 inc = self.findincoming(remote, common, remote_heads, force=force)
1506 inc = self.findincoming(remote, common, remote_heads, force=force)
1501
1507
1502 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1508 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1503 if revs is not None:
1509 if revs is not None:
1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1510 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1505 else:
1511 else:
1506 bases, heads = update, self.changelog.heads()
1512 bases, heads = update, self.changelog.heads()
1507
1513
1508 def checkbranch(lheads, rheads, updatelh):
1514 def checkbranch(lheads, rheads, updatelh):
1509 '''
1515 '''
1510 check whether there are more local heads than remote heads on
1516 check whether there are more local heads than remote heads on
1511 a specific branch.
1517 a specific branch.
1512
1518
1513 lheads: local branch heads
1519 lheads: local branch heads
1514 rheads: remote branch heads
1520 rheads: remote branch heads
1515 updatelh: outgoing local branch heads
1521 updatelh: outgoing local branch heads
1516 '''
1522 '''
1517
1523
1518 warn = 0
1524 warn = 0
1519
1525
1520 if not revs and len(lheads) > len(rheads):
1526 if not revs and len(lheads) > len(rheads):
1521 warn = 1
1527 warn = 1
1522 else:
1528 else:
1523 updatelheads = [self.changelog.heads(x, lheads)
1529 updatelheads = [self.changelog.heads(x, lheads)
1524 for x in updatelh]
1530 for x in updatelh]
1525 newheads = set(sum(updatelheads, [])) & set(lheads)
1531 newheads = set(sum(updatelheads, [])) & set(lheads)
1526
1532
1527 if not newheads:
1533 if not newheads:
1528 return True
1534 return True
1529
1535
1530 for r in rheads:
1536 for r in rheads:
1531 if r in self.changelog.nodemap:
1537 if r in self.changelog.nodemap:
1532 desc = self.changelog.heads(r, heads)
1538 desc = self.changelog.heads(r, heads)
1533 l = [h for h in heads if h in desc]
1539 l = [h for h in heads if h in desc]
1534 if not l:
1540 if not l:
1535 newheads.add(r)
1541 newheads.add(r)
1536 else:
1542 else:
1537 newheads.add(r)
1543 newheads.add(r)
1538 if len(newheads) > len(rheads):
1544 if len(newheads) > len(rheads):
1539 warn = 1
1545 warn = 1
1540
1546
1541 if warn:
1547 if warn:
1542 if not rheads: # new branch requires --force
1548 if not rheads: # new branch requires --force
1543 self.ui.warn(_("abort: push creates new"
1549 self.ui.warn(_("abort: push creates new"
1544 " remote branch '%s'!\n") %
1550 " remote branch '%s'!\n") %
1545 self[updatelh[0]].branch())
1551 self[updatelh[0]].branch())
1546 else:
1552 else:
1547 self.ui.warn(_("abort: push creates new remote heads!\n"))
1553 self.ui.warn(_("abort: push creates new remote heads!\n"))
1548
1554
1549 self.ui.status(_("(did you forget to merge?"
1555 self.ui.status(_("(did you forget to merge?"
1550 " use push -f to force)\n"))
1556 " use push -f to force)\n"))
1551 return False
1557 return False
1552 return True
1558 return True
1553
1559
1554 if not bases:
1560 if not bases:
1555 self.ui.status(_("no changes found\n"))
1561 self.ui.status(_("no changes found\n"))
1556 return None, 1
1562 return None, 1
1557 elif not force:
1563 elif not force:
1558 # Check for each named branch if we're creating new remote heads.
1564 # Check for each named branch if we're creating new remote heads.
1559 # To be a remote head after push, node must be either:
1565 # To be a remote head after push, node must be either:
1560 # - unknown locally
1566 # - unknown locally
1561 # - a local outgoing head descended from update
1567 # - a local outgoing head descended from update
1562 # - a remote head that's known locally and not
1568 # - a remote head that's known locally and not
1563 # ancestral to an outgoing head
1569 # ancestral to an outgoing head
1564 #
1570 #
1565 # New named branches cannot be created without --force.
1571 # New named branches cannot be created without --force.
1566
1572
1567 if remote_heads != [nullid]:
1573 if remote_heads != [nullid]:
1568 if remote.capable('branchmap'):
1574 if remote.capable('branchmap'):
1569 localhds = {}
1575 localhds = {}
1570 if not revs:
1576 if not revs:
1571 localhds = self.branchmap()
1577 localhds = self.branchmap()
1572 else:
1578 else:
1573 for n in heads:
1579 for n in heads:
1574 branch = self[n].branch()
1580 branch = self[n].branch()
1575 if branch in localhds:
1581 if branch in localhds:
1576 localhds[branch].append(n)
1582 localhds[branch].append(n)
1577 else:
1583 else:
1578 localhds[branch] = [n]
1584 localhds[branch] = [n]
1579
1585
1580 remotehds = remote.branchmap()
1586 remotehds = remote.branchmap()
1581
1587
1582 for lh in localhds:
1588 for lh in localhds:
1583 if lh in remotehds:
1589 if lh in remotehds:
1584 rheads = remotehds[lh]
1590 rheads = remotehds[lh]
1585 else:
1591 else:
1586 rheads = []
1592 rheads = []
1587 lheads = localhds[lh]
1593 lheads = localhds[lh]
1588 updatelh = [upd for upd in update
1594 updatelh = [upd for upd in update
1589 if self[upd].branch() == lh]
1595 if self[upd].branch() == lh]
1590 if not updatelh:
1596 if not updatelh:
1591 continue
1597 continue
1592 if not checkbranch(lheads, rheads, updatelh):
1598 if not checkbranch(lheads, rheads, updatelh):
1593 return None, 0
1599 return None, 0
1594 else:
1600 else:
1595 if not checkbranch(heads, remote_heads, update):
1601 if not checkbranch(heads, remote_heads, update):
1596 return None, 0
1602 return None, 0
1597
1603
1598 if inc:
1604 if inc:
1599 self.ui.warn(_("note: unsynced remote changes!\n"))
1605 self.ui.warn(_("note: unsynced remote changes!\n"))
1600
1606
1601
1607
1602 if revs is None:
1608 if revs is None:
1603 # use the fast path, no race possible on push
1609 # use the fast path, no race possible on push
1604 cg = self._changegroup(common.keys(), 'push')
1610 cg = self._changegroup(common.keys(), 'push')
1605 else:
1611 else:
1606 cg = self.changegroupsubset(update, revs, 'push')
1612 cg = self.changegroupsubset(update, revs, 'push')
1607 return cg, remote_heads
1613 return cg, remote_heads
1608
1614
1609 def push_addchangegroup(self, remote, force, revs):
1615 def push_addchangegroup(self, remote, force, revs):
1610 lock = remote.lock()
1616 lock = remote.lock()
1611 try:
1617 try:
1612 ret = self.prepush(remote, force, revs)
1618 ret = self.prepush(remote, force, revs)
1613 if ret[0] is not None:
1619 if ret[0] is not None:
1614 cg, remote_heads = ret
1620 cg, remote_heads = ret
1615 return remote.addchangegroup(cg, 'push', self.url())
1621 return remote.addchangegroup(cg, 'push', self.url())
1616 return ret[1]
1622 return ret[1]
1617 finally:
1623 finally:
1618 lock.release()
1624 lock.release()
1619
1625
1620 def push_unbundle(self, remote, force, revs):
1626 def push_unbundle(self, remote, force, revs):
1621 # local repo finds heads on server, finds out what revs it
1627 # local repo finds heads on server, finds out what revs it
1622 # must push. once revs transferred, if server finds it has
1628 # must push. once revs transferred, if server finds it has
1623 # different heads (someone else won commit/push race), server
1629 # different heads (someone else won commit/push race), server
1624 # aborts.
1630 # aborts.
1625
1631
1626 ret = self.prepush(remote, force, revs)
1632 ret = self.prepush(remote, force, revs)
1627 if ret[0] is not None:
1633 if ret[0] is not None:
1628 cg, remote_heads = ret
1634 cg, remote_heads = ret
1629 if force: remote_heads = ['force']
1635 if force: remote_heads = ['force']
1630 return remote.unbundle(cg, remote_heads, 'push')
1636 return remote.unbundle(cg, remote_heads, 'push')
1631 return ret[1]
1637 return ret[1]
1632
1638
1633 def changegroupinfo(self, nodes, source):
1639 def changegroupinfo(self, nodes, source):
1634 if self.ui.verbose or source == 'bundle':
1640 if self.ui.verbose or source == 'bundle':
1635 self.ui.status(_("%d changesets found\n") % len(nodes))
1641 self.ui.status(_("%d changesets found\n") % len(nodes))
1636 if self.ui.debugflag:
1642 if self.ui.debugflag:
1637 self.ui.debug(_("list of changesets:\n"))
1643 self.ui.debug(_("list of changesets:\n"))
1638 for node in nodes:
1644 for node in nodes:
1639 self.ui.debug("%s\n" % hex(node))
1645 self.ui.debug("%s\n" % hex(node))
1640
1646
1641 def changegroupsubset(self, bases, heads, source, extranodes=None):
1647 def changegroupsubset(self, bases, heads, source, extranodes=None):
1642 """This function generates a changegroup consisting of all the nodes
1648 """This function generates a changegroup consisting of all the nodes
1643 that are descendents of any of the bases, and ancestors of any of
1649 that are descendents of any of the bases, and ancestors of any of
1644 the heads.
1650 the heads.
1645
1651
1646 It is fairly complex as determining which filenodes and which
1652 It is fairly complex as determining which filenodes and which
1647 manifest nodes need to be included for the changeset to be complete
1653 manifest nodes need to be included for the changeset to be complete
1648 is non-trivial.
1654 is non-trivial.
1649
1655
1650 Another wrinkle is doing the reverse, figuring out which changeset in
1656 Another wrinkle is doing the reverse, figuring out which changeset in
1651 the changegroup a particular filenode or manifestnode belongs to.
1657 the changegroup a particular filenode or manifestnode belongs to.
1652
1658
1653 The caller can specify some nodes that must be included in the
1659 The caller can specify some nodes that must be included in the
1654 changegroup using the extranodes argument. It should be a dict
1660 changegroup using the extranodes argument. It should be a dict
1655 where the keys are the filenames (or 1 for the manifest), and the
1661 where the keys are the filenames (or 1 for the manifest), and the
1656 values are lists of (node, linknode) tuples, where node is a wanted
1662 values are lists of (node, linknode) tuples, where node is a wanted
1657 node and linknode is the changelog node that should be transmitted as
1663 node and linknode is the changelog node that should be transmitted as
1658 the linkrev.
1664 the linkrev.
1659 """
1665 """
1660
1666
1661 if extranodes is None:
1667 if extranodes is None:
1662 # can we go through the fast path ?
1668 # can we go through the fast path ?
1663 heads.sort()
1669 heads.sort()
1664 allheads = self.heads()
1670 allheads = self.heads()
1665 allheads.sort()
1671 allheads.sort()
1666 if heads == allheads:
1672 if heads == allheads:
1667 common = []
1673 common = []
1668 # parents of bases are known from both sides
1674 # parents of bases are known from both sides
1669 for n in bases:
1675 for n in bases:
1670 for p in self.changelog.parents(n):
1676 for p in self.changelog.parents(n):
1671 if p != nullid:
1677 if p != nullid:
1672 common.append(p)
1678 common.append(p)
1673 return self._changegroup(common, source)
1679 return self._changegroup(common, source)
1674
1680
1675 self.hook('preoutgoing', throw=True, source=source)
1681 self.hook('preoutgoing', throw=True, source=source)
1676
1682
1677 # Set up some initial variables
1683 # Set up some initial variables
1678 # Make it easy to refer to self.changelog
1684 # Make it easy to refer to self.changelog
1679 cl = self.changelog
1685 cl = self.changelog
1680 # msng is short for missing - compute the list of changesets in this
1686 # msng is short for missing - compute the list of changesets in this
1681 # changegroup.
1687 # changegroup.
1682 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1688 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1683 self.changegroupinfo(msng_cl_lst, source)
1689 self.changegroupinfo(msng_cl_lst, source)
1684 # Some bases may turn out to be superfluous, and some heads may be
1690 # Some bases may turn out to be superfluous, and some heads may be
1685 # too. nodesbetween will return the minimal set of bases and heads
1691 # too. nodesbetween will return the minimal set of bases and heads
1686 # necessary to re-create the changegroup.
1692 # necessary to re-create the changegroup.
1687
1693
1688 # Known heads are the list of heads that it is assumed the recipient
1694 # Known heads are the list of heads that it is assumed the recipient
1689 # of this changegroup will know about.
1695 # of this changegroup will know about.
1690 knownheads = set()
1696 knownheads = set()
1691 # We assume that all parents of bases are known heads.
1697 # We assume that all parents of bases are known heads.
1692 for n in bases:
1698 for n in bases:
1693 knownheads.update(cl.parents(n))
1699 knownheads.update(cl.parents(n))
1694 knownheads.discard(nullid)
1700 knownheads.discard(nullid)
1695 knownheads = list(knownheads)
1701 knownheads = list(knownheads)
1696 if knownheads:
1702 if knownheads:
1697 # Now that we know what heads are known, we can compute which
1703 # Now that we know what heads are known, we can compute which
1698 # changesets are known. The recipient must know about all
1704 # changesets are known. The recipient must know about all
1699 # changesets required to reach the known heads from the null
1705 # changesets required to reach the known heads from the null
1700 # changeset.
1706 # changeset.
1701 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1707 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1702 junk = None
1708 junk = None
1703 # Transform the list into a set.
1709 # Transform the list into a set.
1704 has_cl_set = set(has_cl_set)
1710 has_cl_set = set(has_cl_set)
1705 else:
1711 else:
1706 # If there were no known heads, the recipient cannot be assumed to
1712 # If there were no known heads, the recipient cannot be assumed to
1707 # know about any changesets.
1713 # know about any changesets.
1708 has_cl_set = set()
1714 has_cl_set = set()
1709
1715
1710 # Make it easy to refer to self.manifest
1716 # Make it easy to refer to self.manifest
1711 mnfst = self.manifest
1717 mnfst = self.manifest
1712 # We don't know which manifests are missing yet
1718 # We don't know which manifests are missing yet
1713 msng_mnfst_set = {}
1719 msng_mnfst_set = {}
1714 # Nor do we know which filenodes are missing.
1720 # Nor do we know which filenodes are missing.
1715 msng_filenode_set = {}
1721 msng_filenode_set = {}
1716
1722
1717 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1723 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1718 junk = None
1724 junk = None
1719
1725
1720 # A changeset always belongs to itself, so the changenode lookup
1726 # A changeset always belongs to itself, so the changenode lookup
1721 # function for a changenode is identity.
1727 # function for a changenode is identity.
1722 def identity(x):
1728 def identity(x):
1723 return x
1729 return x
1724
1730
1725 # If we determine that a particular file or manifest node must be a
1731 # If we determine that a particular file or manifest node must be a
1726 # node that the recipient of the changegroup will already have, we can
1732 # node that the recipient of the changegroup will already have, we can
1727 # also assume the recipient will have all the parents. This function
1733 # also assume the recipient will have all the parents. This function
1728 # prunes them from the set of missing nodes.
1734 # prunes them from the set of missing nodes.
1729 def prune_parents(revlog, hasset, msngset):
1735 def prune_parents(revlog, hasset, msngset):
1730 haslst = list(hasset)
1736 haslst = list(hasset)
1731 haslst.sort(key=revlog.rev)
1737 haslst.sort(key=revlog.rev)
1732 for node in haslst:
1738 for node in haslst:
1733 parentlst = [p for p in revlog.parents(node) if p != nullid]
1739 parentlst = [p for p in revlog.parents(node) if p != nullid]
1734 while parentlst:
1740 while parentlst:
1735 n = parentlst.pop()
1741 n = parentlst.pop()
1736 if n not in hasset:
1742 if n not in hasset:
1737 hasset.add(n)
1743 hasset.add(n)
1738 p = [p for p in revlog.parents(n) if p != nullid]
1744 p = [p for p in revlog.parents(n) if p != nullid]
1739 parentlst.extend(p)
1745 parentlst.extend(p)
1740 for n in hasset:
1746 for n in hasset:
1741 msngset.pop(n, None)
1747 msngset.pop(n, None)
1742
1748
1743 # This is a function generating function used to set up an environment
1749 # This is a function generating function used to set up an environment
1744 # for the inner function to execute in.
1750 # for the inner function to execute in.
1745 def manifest_and_file_collector(changedfileset):
1751 def manifest_and_file_collector(changedfileset):
1746 # This is an information gathering function that gathers
1752 # This is an information gathering function that gathers
1747 # information from each changeset node that goes out as part of
1753 # information from each changeset node that goes out as part of
1748 # the changegroup. The information gathered is a list of which
1754 # the changegroup. The information gathered is a list of which
1749 # manifest nodes are potentially required (the recipient may
1755 # manifest nodes are potentially required (the recipient may
1750 # already have them) and total list of all files which were
1756 # already have them) and total list of all files which were
1751 # changed in any changeset in the changegroup.
1757 # changed in any changeset in the changegroup.
1752 #
1758 #
1753 # We also remember the first changenode we saw any manifest
1759 # We also remember the first changenode we saw any manifest
1754 # referenced by so we can later determine which changenode 'owns'
1760 # referenced by so we can later determine which changenode 'owns'
1755 # the manifest.
1761 # the manifest.
1756 def collect_manifests_and_files(clnode):
1762 def collect_manifests_and_files(clnode):
1757 c = cl.read(clnode)
1763 c = cl.read(clnode)
1758 for f in c[3]:
1764 for f in c[3]:
1759 # This is to make sure we only have one instance of each
1765 # This is to make sure we only have one instance of each
1760 # filename string for each filename.
1766 # filename string for each filename.
1761 changedfileset.setdefault(f, f)
1767 changedfileset.setdefault(f, f)
1762 msng_mnfst_set.setdefault(c[0], clnode)
1768 msng_mnfst_set.setdefault(c[0], clnode)
1763 return collect_manifests_and_files
1769 return collect_manifests_and_files
1764
1770
1765 # Figure out which manifest nodes (of the ones we think might be part
1771 # Figure out which manifest nodes (of the ones we think might be part
1766 # of the changegroup) the recipient must know about and remove them
1772 # of the changegroup) the recipient must know about and remove them
1767 # from the changegroup.
1773 # from the changegroup.
1768 def prune_manifests():
1774 def prune_manifests():
1769 has_mnfst_set = set()
1775 has_mnfst_set = set()
1770 for n in msng_mnfst_set:
1776 for n in msng_mnfst_set:
1771 # If a 'missing' manifest thinks it belongs to a changenode
1777 # If a 'missing' manifest thinks it belongs to a changenode
1772 # the recipient is assumed to have, obviously the recipient
1778 # the recipient is assumed to have, obviously the recipient
1773 # must have that manifest.
1779 # must have that manifest.
1774 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1780 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1775 if linknode in has_cl_set:
1781 if linknode in has_cl_set:
1776 has_mnfst_set.add(n)
1782 has_mnfst_set.add(n)
1777 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1783 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1778
1784
1779 # Use the information collected in collect_manifests_and_files to say
1785 # Use the information collected in collect_manifests_and_files to say
1780 # which changenode any manifestnode belongs to.
1786 # which changenode any manifestnode belongs to.
1781 def lookup_manifest_link(mnfstnode):
1787 def lookup_manifest_link(mnfstnode):
1782 return msng_mnfst_set[mnfstnode]
1788 return msng_mnfst_set[mnfstnode]
1783
1789
1784 # A function generating function that sets up the initial environment
1790 # A function generating function that sets up the initial environment
1785 # the inner function.
1791 # the inner function.
1786 def filenode_collector(changedfiles):
1792 def filenode_collector(changedfiles):
1787 next_rev = [0]
1793 next_rev = [0]
1788 # This gathers information from each manifestnode included in the
1794 # This gathers information from each manifestnode included in the
1789 # changegroup about which filenodes the manifest node references
1795 # changegroup about which filenodes the manifest node references
1790 # so we can include those in the changegroup too.
1796 # so we can include those in the changegroup too.
1791 #
1797 #
1792 # It also remembers which changenode each filenode belongs to. It
1798 # It also remembers which changenode each filenode belongs to. It
1793 # does this by assuming the a filenode belongs to the changenode
1799 # does this by assuming the a filenode belongs to the changenode
1794 # the first manifest that references it belongs to.
1800 # the first manifest that references it belongs to.
1795 def collect_msng_filenodes(mnfstnode):
1801 def collect_msng_filenodes(mnfstnode):
1796 r = mnfst.rev(mnfstnode)
1802 r = mnfst.rev(mnfstnode)
1797 if r == next_rev[0]:
1803 if r == next_rev[0]:
1798 # If the last rev we looked at was the one just previous,
1804 # If the last rev we looked at was the one just previous,
1799 # we only need to see a diff.
1805 # we only need to see a diff.
1800 deltamf = mnfst.readdelta(mnfstnode)
1806 deltamf = mnfst.readdelta(mnfstnode)
1801 # For each line in the delta
1807 # For each line in the delta
1802 for f, fnode in deltamf.iteritems():
1808 for f, fnode in deltamf.iteritems():
1803 f = changedfiles.get(f, None)
1809 f = changedfiles.get(f, None)
1804 # And if the file is in the list of files we care
1810 # And if the file is in the list of files we care
1805 # about.
1811 # about.
1806 if f is not None:
1812 if f is not None:
1807 # Get the changenode this manifest belongs to
1813 # Get the changenode this manifest belongs to
1808 clnode = msng_mnfst_set[mnfstnode]
1814 clnode = msng_mnfst_set[mnfstnode]
1809 # Create the set of filenodes for the file if
1815 # Create the set of filenodes for the file if
1810 # there isn't one already.
1816 # there isn't one already.
1811 ndset = msng_filenode_set.setdefault(f, {})
1817 ndset = msng_filenode_set.setdefault(f, {})
1812 # And set the filenode's changelog node to the
1818 # And set the filenode's changelog node to the
1813 # manifest's if it hasn't been set already.
1819 # manifest's if it hasn't been set already.
1814 ndset.setdefault(fnode, clnode)
1820 ndset.setdefault(fnode, clnode)
1815 else:
1821 else:
1816 # Otherwise we need a full manifest.
1822 # Otherwise we need a full manifest.
1817 m = mnfst.read(mnfstnode)
1823 m = mnfst.read(mnfstnode)
1818 # For every file in we care about.
1824 # For every file in we care about.
1819 for f in changedfiles:
1825 for f in changedfiles:
1820 fnode = m.get(f, None)
1826 fnode = m.get(f, None)
1821 # If it's in the manifest
1827 # If it's in the manifest
1822 if fnode is not None:
1828 if fnode is not None:
1823 # See comments above.
1829 # See comments above.
1824 clnode = msng_mnfst_set[mnfstnode]
1830 clnode = msng_mnfst_set[mnfstnode]
1825 ndset = msng_filenode_set.setdefault(f, {})
1831 ndset = msng_filenode_set.setdefault(f, {})
1826 ndset.setdefault(fnode, clnode)
1832 ndset.setdefault(fnode, clnode)
1827 # Remember the revision we hope to see next.
1833 # Remember the revision we hope to see next.
1828 next_rev[0] = r + 1
1834 next_rev[0] = r + 1
1829 return collect_msng_filenodes
1835 return collect_msng_filenodes
1830
1836
1831 # We have a list of filenodes we think we need for a file, lets remove
1837 # We have a list of filenodes we think we need for a file, lets remove
1832 # all those we know the recipient must have.
1838 # all those we know the recipient must have.
1833 def prune_filenodes(f, filerevlog):
1839 def prune_filenodes(f, filerevlog):
1834 msngset = msng_filenode_set[f]
1840 msngset = msng_filenode_set[f]
1835 hasset = set()
1841 hasset = set()
1836 # If a 'missing' filenode thinks it belongs to a changenode we
1842 # If a 'missing' filenode thinks it belongs to a changenode we
1837 # assume the recipient must have, then the recipient must have
1843 # assume the recipient must have, then the recipient must have
1838 # that filenode.
1844 # that filenode.
1839 for n in msngset:
1845 for n in msngset:
1840 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1846 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1841 if clnode in has_cl_set:
1847 if clnode in has_cl_set:
1842 hasset.add(n)
1848 hasset.add(n)
1843 prune_parents(filerevlog, hasset, msngset)
1849 prune_parents(filerevlog, hasset, msngset)
1844
1850
1845 # A function generator function that sets up the a context for the
1851 # A function generator function that sets up the a context for the
1846 # inner function.
1852 # inner function.
1847 def lookup_filenode_link_func(fname):
1853 def lookup_filenode_link_func(fname):
1848 msngset = msng_filenode_set[fname]
1854 msngset = msng_filenode_set[fname]
1849 # Lookup the changenode the filenode belongs to.
1855 # Lookup the changenode the filenode belongs to.
1850 def lookup_filenode_link(fnode):
1856 def lookup_filenode_link(fnode):
1851 return msngset[fnode]
1857 return msngset[fnode]
1852 return lookup_filenode_link
1858 return lookup_filenode_link
1853
1859
1854 # Add the nodes that were explicitly requested.
1860 # Add the nodes that were explicitly requested.
1855 def add_extra_nodes(name, nodes):
1861 def add_extra_nodes(name, nodes):
1856 if not extranodes or name not in extranodes:
1862 if not extranodes or name not in extranodes:
1857 return
1863 return
1858
1864
1859 for node, linknode in extranodes[name]:
1865 for node, linknode in extranodes[name]:
1860 if node not in nodes:
1866 if node not in nodes:
1861 nodes[node] = linknode
1867 nodes[node] = linknode
1862
1868
1863 # Now that we have all theses utility functions to help out and
1869 # Now that we have all theses utility functions to help out and
1864 # logically divide up the task, generate the group.
1870 # logically divide up the task, generate the group.
1865 def gengroup():
1871 def gengroup():
1866 # The set of changed files starts empty.
1872 # The set of changed files starts empty.
1867 changedfiles = {}
1873 changedfiles = {}
1868 # Create a changenode group generator that will call our functions
1874 # Create a changenode group generator that will call our functions
1869 # back to lookup the owning changenode and collect information.
1875 # back to lookup the owning changenode and collect information.
1870 group = cl.group(msng_cl_lst, identity,
1876 group = cl.group(msng_cl_lst, identity,
1871 manifest_and_file_collector(changedfiles))
1877 manifest_and_file_collector(changedfiles))
1872 for chnk in group:
1878 for chnk in group:
1873 yield chnk
1879 yield chnk
1874
1880
1875 # The list of manifests has been collected by the generator
1881 # The list of manifests has been collected by the generator
1876 # calling our functions back.
1882 # calling our functions back.
1877 prune_manifests()
1883 prune_manifests()
1878 add_extra_nodes(1, msng_mnfst_set)
1884 add_extra_nodes(1, msng_mnfst_set)
1879 msng_mnfst_lst = msng_mnfst_set.keys()
1885 msng_mnfst_lst = msng_mnfst_set.keys()
1880 # Sort the manifestnodes by revision number.
1886 # Sort the manifestnodes by revision number.
1881 msng_mnfst_lst.sort(key=mnfst.rev)
1887 msng_mnfst_lst.sort(key=mnfst.rev)
1882 # Create a generator for the manifestnodes that calls our lookup
1888 # Create a generator for the manifestnodes that calls our lookup
1883 # and data collection functions back.
1889 # and data collection functions back.
1884 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1890 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1885 filenode_collector(changedfiles))
1891 filenode_collector(changedfiles))
1886 for chnk in group:
1892 for chnk in group:
1887 yield chnk
1893 yield chnk
1888
1894
1889 # These are no longer needed, dereference and toss the memory for
1895 # These are no longer needed, dereference and toss the memory for
1890 # them.
1896 # them.
1891 msng_mnfst_lst = None
1897 msng_mnfst_lst = None
1892 msng_mnfst_set.clear()
1898 msng_mnfst_set.clear()
1893
1899
1894 if extranodes:
1900 if extranodes:
1895 for fname in extranodes:
1901 for fname in extranodes:
1896 if isinstance(fname, int):
1902 if isinstance(fname, int):
1897 continue
1903 continue
1898 msng_filenode_set.setdefault(fname, {})
1904 msng_filenode_set.setdefault(fname, {})
1899 changedfiles[fname] = 1
1905 changedfiles[fname] = 1
1900 # Go through all our files in order sorted by name.
1906 # Go through all our files in order sorted by name.
1901 for fname in sorted(changedfiles):
1907 for fname in sorted(changedfiles):
1902 filerevlog = self.file(fname)
1908 filerevlog = self.file(fname)
1903 if not len(filerevlog):
1909 if not len(filerevlog):
1904 raise util.Abort(_("empty or missing revlog for %s") % fname)
1910 raise util.Abort(_("empty or missing revlog for %s") % fname)
1905 # Toss out the filenodes that the recipient isn't really
1911 # Toss out the filenodes that the recipient isn't really
1906 # missing.
1912 # missing.
1907 if fname in msng_filenode_set:
1913 if fname in msng_filenode_set:
1908 prune_filenodes(fname, filerevlog)
1914 prune_filenodes(fname, filerevlog)
1909 add_extra_nodes(fname, msng_filenode_set[fname])
1915 add_extra_nodes(fname, msng_filenode_set[fname])
1910 msng_filenode_lst = msng_filenode_set[fname].keys()
1916 msng_filenode_lst = msng_filenode_set[fname].keys()
1911 else:
1917 else:
1912 msng_filenode_lst = []
1918 msng_filenode_lst = []
1913 # If any filenodes are left, generate the group for them,
1919 # If any filenodes are left, generate the group for them,
1914 # otherwise don't bother.
1920 # otherwise don't bother.
1915 if len(msng_filenode_lst) > 0:
1921 if len(msng_filenode_lst) > 0:
1916 yield changegroup.chunkheader(len(fname))
1922 yield changegroup.chunkheader(len(fname))
1917 yield fname
1923 yield fname
1918 # Sort the filenodes by their revision #
1924 # Sort the filenodes by their revision #
1919 msng_filenode_lst.sort(key=filerevlog.rev)
1925 msng_filenode_lst.sort(key=filerevlog.rev)
1920 # Create a group generator and only pass in a changenode
1926 # Create a group generator and only pass in a changenode
1921 # lookup function as we need to collect no information
1927 # lookup function as we need to collect no information
1922 # from filenodes.
1928 # from filenodes.
1923 group = filerevlog.group(msng_filenode_lst,
1929 group = filerevlog.group(msng_filenode_lst,
1924 lookup_filenode_link_func(fname))
1930 lookup_filenode_link_func(fname))
1925 for chnk in group:
1931 for chnk in group:
1926 yield chnk
1932 yield chnk
1927 if fname in msng_filenode_set:
1933 if fname in msng_filenode_set:
1928 # Don't need this anymore, toss it to free memory.
1934 # Don't need this anymore, toss it to free memory.
1929 del msng_filenode_set[fname]
1935 del msng_filenode_set[fname]
1930 # Signal that no more groups are left.
1936 # Signal that no more groups are left.
1931 yield changegroup.closechunk()
1937 yield changegroup.closechunk()
1932
1938
1933 if msng_cl_lst:
1939 if msng_cl_lst:
1934 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1940 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1935
1941
1936 return util.chunkbuffer(gengroup())
1942 return util.chunkbuffer(gengroup())
1937
1943
1938 def changegroup(self, basenodes, source):
1944 def changegroup(self, basenodes, source):
1939 # to avoid a race we use changegroupsubset() (issue1320)
1945 # to avoid a race we use changegroupsubset() (issue1320)
1940 return self.changegroupsubset(basenodes, self.heads(), source)
1946 return self.changegroupsubset(basenodes, self.heads(), source)
1941
1947
1942 def _changegroup(self, common, source):
1948 def _changegroup(self, common, source):
1943 """Generate a changegroup of all nodes that we have that a recipient
1949 """Generate a changegroup of all nodes that we have that a recipient
1944 doesn't.
1950 doesn't.
1945
1951
1946 This is much easier than the previous function as we can assume that
1952 This is much easier than the previous function as we can assume that
1947 the recipient has any changenode we aren't sending them.
1953 the recipient has any changenode we aren't sending them.
1948
1954
1949 common is the set of common nodes between remote and self"""
1955 common is the set of common nodes between remote and self"""
1950
1956
1951 self.hook('preoutgoing', throw=True, source=source)
1957 self.hook('preoutgoing', throw=True, source=source)
1952
1958
1953 cl = self.changelog
1959 cl = self.changelog
1954 nodes = cl.findmissing(common)
1960 nodes = cl.findmissing(common)
1955 revset = set([cl.rev(n) for n in nodes])
1961 revset = set([cl.rev(n) for n in nodes])
1956 self.changegroupinfo(nodes, source)
1962 self.changegroupinfo(nodes, source)
1957
1963
1958 def identity(x):
1964 def identity(x):
1959 return x
1965 return x
1960
1966
1961 def gennodelst(log):
1967 def gennodelst(log):
1962 for r in log:
1968 for r in log:
1963 if log.linkrev(r) in revset:
1969 if log.linkrev(r) in revset:
1964 yield log.node(r)
1970 yield log.node(r)
1965
1971
1966 def changed_file_collector(changedfileset):
1972 def changed_file_collector(changedfileset):
1967 def collect_changed_files(clnode):
1973 def collect_changed_files(clnode):
1968 c = cl.read(clnode)
1974 c = cl.read(clnode)
1969 changedfileset.update(c[3])
1975 changedfileset.update(c[3])
1970 return collect_changed_files
1976 return collect_changed_files
1971
1977
1972 def lookuprevlink_func(revlog):
1978 def lookuprevlink_func(revlog):
1973 def lookuprevlink(n):
1979 def lookuprevlink(n):
1974 return cl.node(revlog.linkrev(revlog.rev(n)))
1980 return cl.node(revlog.linkrev(revlog.rev(n)))
1975 return lookuprevlink
1981 return lookuprevlink
1976
1982
1977 def gengroup():
1983 def gengroup():
1978 # construct a list of all changed files
1984 # construct a list of all changed files
1979 changedfiles = set()
1985 changedfiles = set()
1980
1986
1981 for chnk in cl.group(nodes, identity,
1987 for chnk in cl.group(nodes, identity,
1982 changed_file_collector(changedfiles)):
1988 changed_file_collector(changedfiles)):
1983 yield chnk
1989 yield chnk
1984
1990
1985 mnfst = self.manifest
1991 mnfst = self.manifest
1986 nodeiter = gennodelst(mnfst)
1992 nodeiter = gennodelst(mnfst)
1987 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1993 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1988 yield chnk
1994 yield chnk
1989
1995
1990 for fname in sorted(changedfiles):
1996 for fname in sorted(changedfiles):
1991 filerevlog = self.file(fname)
1997 filerevlog = self.file(fname)
1992 if not len(filerevlog):
1998 if not len(filerevlog):
1993 raise util.Abort(_("empty or missing revlog for %s") % fname)
1999 raise util.Abort(_("empty or missing revlog for %s") % fname)
1994 nodeiter = gennodelst(filerevlog)
2000 nodeiter = gennodelst(filerevlog)
1995 nodeiter = list(nodeiter)
2001 nodeiter = list(nodeiter)
1996 if nodeiter:
2002 if nodeiter:
1997 yield changegroup.chunkheader(len(fname))
2003 yield changegroup.chunkheader(len(fname))
1998 yield fname
2004 yield fname
1999 lookup = lookuprevlink_func(filerevlog)
2005 lookup = lookuprevlink_func(filerevlog)
2000 for chnk in filerevlog.group(nodeiter, lookup):
2006 for chnk in filerevlog.group(nodeiter, lookup):
2001 yield chnk
2007 yield chnk
2002
2008
2003 yield changegroup.closechunk()
2009 yield changegroup.closechunk()
2004
2010
2005 if nodes:
2011 if nodes:
2006 self.hook('outgoing', node=hex(nodes[0]), source=source)
2012 self.hook('outgoing', node=hex(nodes[0]), source=source)
2007
2013
2008 return util.chunkbuffer(gengroup())
2014 return util.chunkbuffer(gengroup())
2009
2015
2010 def addchangegroup(self, source, srctype, url, emptyok=False):
2016 def addchangegroup(self, source, srctype, url, emptyok=False):
2011 """add changegroup to repo.
2017 """add changegroup to repo.
2012
2018
2013 return values:
2019 return values:
2014 - nothing changed or no source: 0
2020 - nothing changed or no source: 0
2015 - more heads than before: 1+added heads (2..n)
2021 - more heads than before: 1+added heads (2..n)
2016 - less heads than before: -1-removed heads (-2..-n)
2022 - less heads than before: -1-removed heads (-2..-n)
2017 - number of heads stays the same: 1
2023 - number of heads stays the same: 1
2018 """
2024 """
2019 def csmap(x):
2025 def csmap(x):
2020 self.ui.debug(_("add changeset %s\n") % short(x))
2026 self.ui.debug(_("add changeset %s\n") % short(x))
2021 return len(cl)
2027 return len(cl)
2022
2028
2023 def revmap(x):
2029 def revmap(x):
2024 return cl.rev(x)
2030 return cl.rev(x)
2025
2031
2026 if not source:
2032 if not source:
2027 return 0
2033 return 0
2028
2034
2029 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2035 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2030
2036
2031 changesets = files = revisions = 0
2037 changesets = files = revisions = 0
2032
2038
2033 # write changelog data to temp files so concurrent readers will not see
2039 # write changelog data to temp files so concurrent readers will not see
2034 # inconsistent view
2040 # inconsistent view
2035 cl = self.changelog
2041 cl = self.changelog
2036 cl.delayupdate()
2042 cl.delayupdate()
2037 oldheads = len(cl.heads())
2043 oldheads = len(cl.heads())
2038
2044
2039 tr = self.transaction()
2045 tr = self.transaction()
2040 try:
2046 try:
2041 trp = weakref.proxy(tr)
2047 trp = weakref.proxy(tr)
2042 # pull off the changeset group
2048 # pull off the changeset group
2043 self.ui.status(_("adding changesets\n"))
2049 self.ui.status(_("adding changesets\n"))
2044 clstart = len(cl)
2050 clstart = len(cl)
2045 chunkiter = changegroup.chunkiter(source)
2051 chunkiter = changegroup.chunkiter(source)
2046 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2052 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2047 raise util.Abort(_("received changelog group is empty"))
2053 raise util.Abort(_("received changelog group is empty"))
2048 clend = len(cl)
2054 clend = len(cl)
2049 changesets = clend - clstart
2055 changesets = clend - clstart
2050
2056
2051 # pull off the manifest group
2057 # pull off the manifest group
2052 self.ui.status(_("adding manifests\n"))
2058 self.ui.status(_("adding manifests\n"))
2053 chunkiter = changegroup.chunkiter(source)
2059 chunkiter = changegroup.chunkiter(source)
2054 # no need to check for empty manifest group here:
2060 # no need to check for empty manifest group here:
2055 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2061 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2056 # no new manifest will be created and the manifest group will
2062 # no new manifest will be created and the manifest group will
2057 # be empty during the pull
2063 # be empty during the pull
2058 self.manifest.addgroup(chunkiter, revmap, trp)
2064 self.manifest.addgroup(chunkiter, revmap, trp)
2059
2065
2060 # process the files
2066 # process the files
2061 self.ui.status(_("adding file changes\n"))
2067 self.ui.status(_("adding file changes\n"))
2062 while 1:
2068 while 1:
2063 f = changegroup.getchunk(source)
2069 f = changegroup.getchunk(source)
2064 if not f:
2070 if not f:
2065 break
2071 break
2066 self.ui.debug(_("adding %s revisions\n") % f)
2072 self.ui.debug(_("adding %s revisions\n") % f)
2067 fl = self.file(f)
2073 fl = self.file(f)
2068 o = len(fl)
2074 o = len(fl)
2069 chunkiter = changegroup.chunkiter(source)
2075 chunkiter = changegroup.chunkiter(source)
2070 if fl.addgroup(chunkiter, revmap, trp) is None:
2076 if fl.addgroup(chunkiter, revmap, trp) is None:
2071 raise util.Abort(_("received file revlog group is empty"))
2077 raise util.Abort(_("received file revlog group is empty"))
2072 revisions += len(fl) - o
2078 revisions += len(fl) - o
2073 files += 1
2079 files += 1
2074
2080
2075 newheads = len(cl.heads())
2081 newheads = len(cl.heads())
2076 heads = ""
2082 heads = ""
2077 if oldheads and newheads != oldheads:
2083 if oldheads and newheads != oldheads:
2078 heads = _(" (%+d heads)") % (newheads - oldheads)
2084 heads = _(" (%+d heads)") % (newheads - oldheads)
2079
2085
2080 self.ui.status(_("added %d changesets"
2086 self.ui.status(_("added %d changesets"
2081 " with %d changes to %d files%s\n")
2087 " with %d changes to %d files%s\n")
2082 % (changesets, revisions, files, heads))
2088 % (changesets, revisions, files, heads))
2083
2089
2084 if changesets > 0:
2090 if changesets > 0:
2085 p = lambda: cl.writepending() and self.root or ""
2091 p = lambda: cl.writepending() and self.root or ""
2086 self.hook('pretxnchangegroup', throw=True,
2092 self.hook('pretxnchangegroup', throw=True,
2087 node=hex(cl.node(clstart)), source=srctype,
2093 node=hex(cl.node(clstart)), source=srctype,
2088 url=url, pending=p)
2094 url=url, pending=p)
2089
2095
2090 # make changelog see real files again
2096 # make changelog see real files again
2091 cl.finalize(trp)
2097 cl.finalize(trp)
2092
2098
2093 tr.close()
2099 tr.close()
2094 finally:
2100 finally:
2095 del tr
2101 del tr
2096
2102
2097 if changesets > 0:
2103 if changesets > 0:
2098 # forcefully update the on-disk branch cache
2104 # forcefully update the on-disk branch cache
2099 self.ui.debug(_("updating the branch cache\n"))
2105 self.ui.debug(_("updating the branch cache\n"))
2100 self.branchtags()
2106 self.branchtags()
2101 self.hook("changegroup", node=hex(cl.node(clstart)),
2107 self.hook("changegroup", node=hex(cl.node(clstart)),
2102 source=srctype, url=url)
2108 source=srctype, url=url)
2103
2109
2104 for i in xrange(clstart, clend):
2110 for i in xrange(clstart, clend):
2105 self.hook("incoming", node=hex(cl.node(i)),
2111 self.hook("incoming", node=hex(cl.node(i)),
2106 source=srctype, url=url)
2112 source=srctype, url=url)
2107
2113
2108 # never return 0 here:
2114 # never return 0 here:
2109 if newheads < oldheads:
2115 if newheads < oldheads:
2110 return newheads - oldheads - 1
2116 return newheads - oldheads - 1
2111 else:
2117 else:
2112 return newheads - oldheads + 1
2118 return newheads - oldheads + 1
2113
2119
2114
2120
2115 def stream_in(self, remote):
2121 def stream_in(self, remote):
2116 fp = remote.stream_out()
2122 fp = remote.stream_out()
2117 l = fp.readline()
2123 l = fp.readline()
2118 try:
2124 try:
2119 resp = int(l)
2125 resp = int(l)
2120 except ValueError:
2126 except ValueError:
2121 raise error.ResponseError(
2127 raise error.ResponseError(
2122 _('Unexpected response from remote server:'), l)
2128 _('Unexpected response from remote server:'), l)
2123 if resp == 1:
2129 if resp == 1:
2124 raise util.Abort(_('operation forbidden by server'))
2130 raise util.Abort(_('operation forbidden by server'))
2125 elif resp == 2:
2131 elif resp == 2:
2126 raise util.Abort(_('locking the remote repository failed'))
2132 raise util.Abort(_('locking the remote repository failed'))
2127 elif resp != 0:
2133 elif resp != 0:
2128 raise util.Abort(_('the server sent an unknown error code'))
2134 raise util.Abort(_('the server sent an unknown error code'))
2129 self.ui.status(_('streaming all changes\n'))
2135 self.ui.status(_('streaming all changes\n'))
2130 l = fp.readline()
2136 l = fp.readline()
2131 try:
2137 try:
2132 total_files, total_bytes = map(int, l.split(' ', 1))
2138 total_files, total_bytes = map(int, l.split(' ', 1))
2133 except (ValueError, TypeError):
2139 except (ValueError, TypeError):
2134 raise error.ResponseError(
2140 raise error.ResponseError(
2135 _('Unexpected response from remote server:'), l)
2141 _('Unexpected response from remote server:'), l)
2136 self.ui.status(_('%d files to transfer, %s of data\n') %
2142 self.ui.status(_('%d files to transfer, %s of data\n') %
2137 (total_files, util.bytecount(total_bytes)))
2143 (total_files, util.bytecount(total_bytes)))
2138 start = time.time()
2144 start = time.time()
2139 for i in xrange(total_files):
2145 for i in xrange(total_files):
2140 # XXX doesn't support '\n' or '\r' in filenames
2146 # XXX doesn't support '\n' or '\r' in filenames
2141 l = fp.readline()
2147 l = fp.readline()
2142 try:
2148 try:
2143 name, size = l.split('\0', 1)
2149 name, size = l.split('\0', 1)
2144 size = int(size)
2150 size = int(size)
2145 except (ValueError, TypeError):
2151 except (ValueError, TypeError):
2146 raise error.ResponseError(
2152 raise error.ResponseError(
2147 _('Unexpected response from remote server:'), l)
2153 _('Unexpected response from remote server:'), l)
2148 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2154 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2149 # for backwards compat, name was partially encoded
2155 # for backwards compat, name was partially encoded
2150 ofp = self.sopener(store.decodedir(name), 'w')
2156 ofp = self.sopener(store.decodedir(name), 'w')
2151 for chunk in util.filechunkiter(fp, limit=size):
2157 for chunk in util.filechunkiter(fp, limit=size):
2152 ofp.write(chunk)
2158 ofp.write(chunk)
2153 ofp.close()
2159 ofp.close()
2154 elapsed = time.time() - start
2160 elapsed = time.time() - start
2155 if elapsed <= 0:
2161 if elapsed <= 0:
2156 elapsed = 0.001
2162 elapsed = 0.001
2157 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2163 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2158 (util.bytecount(total_bytes), elapsed,
2164 (util.bytecount(total_bytes), elapsed,
2159 util.bytecount(total_bytes / elapsed)))
2165 util.bytecount(total_bytes / elapsed)))
2160 self.invalidate()
2166 self.invalidate()
2161 return len(self.heads()) + 1
2167 return len(self.heads()) + 1
2162
2168
2163 def clone(self, remote, heads=[], stream=False):
2169 def clone(self, remote, heads=[], stream=False):
2164 '''clone remote repository.
2170 '''clone remote repository.
2165
2171
2166 keyword arguments:
2172 keyword arguments:
2167 heads: list of revs to clone (forces use of pull)
2173 heads: list of revs to clone (forces use of pull)
2168 stream: use streaming clone if possible'''
2174 stream: use streaming clone if possible'''
2169
2175
2170 # now, all clients that can request uncompressed clones can
2176 # now, all clients that can request uncompressed clones can
2171 # read repo formats supported by all servers that can serve
2177 # read repo formats supported by all servers that can serve
2172 # them.
2178 # them.
2173
2179
2174 # if revlog format changes, client will have to check version
2180 # if revlog format changes, client will have to check version
2175 # and format flags on "stream" capability, and use
2181 # and format flags on "stream" capability, and use
2176 # uncompressed only if compatible.
2182 # uncompressed only if compatible.
2177
2183
2178 if stream and not heads and remote.capable('stream'):
2184 if stream and not heads and remote.capable('stream'):
2179 return self.stream_in(remote)
2185 return self.stream_in(remote)
2180 return self.pull(remote, heads)
2186 return self.pull(remote, heads)
2181
2187
2182 # used to avoid circular references so destructors work
2188 # used to avoid circular references so destructors work
2183 def aftertrans(files):
2189 def aftertrans(files):
2184 renamefiles = [tuple(t) for t in files]
2190 renamefiles = [tuple(t) for t in files]
2185 def a():
2191 def a():
2186 for src, dest in renamefiles:
2192 for src, dest in renamefiles:
2187 util.rename(src, dest)
2193 util.rename(src, dest)
2188 return a
2194 return a
2189
2195
2190 def instance(ui, path, create):
2196 def instance(ui, path, create):
2191 return localrepository(ui, util.drop_scheme('file', path), create)
2197 return localrepository(ui, util.drop_scheme('file', path), create)
2192
2198
2193 def islocal(path):
2199 def islocal(path):
2194 return True
2200 return True
@@ -1,134 +1,134 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
8 # GNU General Public License version 2, incorporated herein by reference.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, byterange, url, error
11 import changelog, byterange, url, error
12 import localrepo, manifest, util, store
12 import localrepo, manifest, util, store
13 import urllib, urllib2, errno
13 import urllib, urllib2, errno
14
14
15 class httprangereader(object):
15 class httprangereader(object):
16 def __init__(self, url, opener):
16 def __init__(self, url, opener):
17 # we assume opener has HTTPRangeHandler
17 # we assume opener has HTTPRangeHandler
18 self.url = url
18 self.url = url
19 self.pos = 0
19 self.pos = 0
20 self.opener = opener
20 self.opener = opener
21 def seek(self, pos):
21 def seek(self, pos):
22 self.pos = pos
22 self.pos = pos
23 def read(self, bytes=None):
23 def read(self, bytes=None):
24 req = urllib2.Request(self.url)
24 req = urllib2.Request(self.url)
25 end = ''
25 end = ''
26 if bytes:
26 if bytes:
27 end = self.pos + bytes - 1
27 end = self.pos + bytes - 1
28 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
28 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
29
29
30 try:
30 try:
31 f = self.opener.open(req)
31 f = self.opener.open(req)
32 data = f.read()
32 data = f.read()
33 if hasattr(f, 'getcode'):
33 if hasattr(f, 'getcode'):
34 # python 2.6+
34 # python 2.6+
35 code = f.getcode()
35 code = f.getcode()
36 elif hasattr(f, 'code'):
36 elif hasattr(f, 'code'):
37 # undocumented attribute, seems to be set in 2.4 and 2.5
37 # undocumented attribute, seems to be set in 2.4 and 2.5
38 code = f.code
38 code = f.code
39 else:
39 else:
40 # Don't know how to check, hope for the best.
40 # Don't know how to check, hope for the best.
41 code = 206
41 code = 206
42 except urllib2.HTTPError, inst:
42 except urllib2.HTTPError, inst:
43 num = inst.code == 404 and errno.ENOENT or None
43 num = inst.code == 404 and errno.ENOENT or None
44 raise IOError(num, inst)
44 raise IOError(num, inst)
45 except urllib2.URLError, inst:
45 except urllib2.URLError, inst:
46 raise IOError(None, inst.reason[1])
46 raise IOError(None, inst.reason[1])
47
47
48 if code == 200:
48 if code == 200:
49 # HTTPRangeHandler does nothing if remote does not support
49 # HTTPRangeHandler does nothing if remote does not support
50 # Range headers and returns the full entity. Let's slice it.
50 # Range headers and returns the full entity. Let's slice it.
51 if bytes:
51 if bytes:
52 data = data[self.pos:self.pos + bytes]
52 data = data[self.pos:self.pos + bytes]
53 else:
53 else:
54 data = data[self.pos:]
54 data = data[self.pos:]
55 elif bytes:
55 elif bytes:
56 data = data[:bytes]
56 data = data[:bytes]
57 self.pos += len(data)
57 self.pos += len(data)
58 return data
58 return data
59
59
60 def build_opener(ui, authinfo):
60 def build_opener(ui, authinfo):
61 # urllib cannot handle URLs with embedded user or passwd
61 # urllib cannot handle URLs with embedded user or passwd
62 urlopener = url.opener(ui, authinfo)
62 urlopener = url.opener(ui, authinfo)
63 urlopener.add_handler(byterange.HTTPRangeHandler())
63 urlopener.add_handler(byterange.HTTPRangeHandler())
64
64
65 def opener(base):
65 def opener(base):
66 """return a function that opens files over http"""
66 """return a function that opens files over http"""
67 p = base
67 p = base
68 def o(path, mode="r"):
68 def o(path, mode="r"):
69 f = "/".join((p, urllib.quote(path)))
69 f = "/".join((p, urllib.quote(path)))
70 return httprangereader(f, urlopener)
70 return httprangereader(f, urlopener)
71 return o
71 return o
72
72
73 return opener
73 return opener
74
74
75 class statichttprepository(localrepo.localrepository):
75 class statichttprepository(localrepo.localrepository):
76 def __init__(self, ui, path):
76 def __init__(self, ui, path):
77 self._url = path
77 self._url = path
78 self.ui = ui
78 self.ui = ui
79
79
80 self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg")
80 self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg")
81
81
82 opener = build_opener(ui, authinfo)
82 opener = build_opener(ui, authinfo)
83 self.opener = opener(self.path)
83 self.opener = opener(self.path)
84
84
85 # find requirements
85 # find requirements
86 try:
86 try:
87 requirements = self.opener("requires").read().splitlines()
87 requirements = self.opener("requires").read().splitlines()
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91 # check if it is a non-empty old-style repository
91 # check if it is a non-empty old-style repository
92 try:
92 try:
93 self.opener("00changelog.i").read(1)
93 self.opener("00changelog.i").read(1)
94 except IOError, inst:
94 except IOError, inst:
95 if inst.errno != errno.ENOENT:
95 if inst.errno != errno.ENOENT:
96 raise
96 raise
97 # we do not care about empty old-style repositories here
97 # we do not care about empty old-style repositories here
98 msg = _("'%s' does not appear to be an hg repository") % path
98 msg = _("'%s' does not appear to be an hg repository") % path
99 raise error.RepoError(msg)
99 raise error.RepoError(msg)
100 requirements = []
100 requirements = []
101
101
102 # check them
102 # check them
103 for r in requirements:
103 for r in requirements:
104 if r not in self.supported:
104 if r not in self.supported:
105 raise error.RepoError(_("requirement '%s' not supported") % r)
105 raise error.RepoError(_("requirement '%s' not supported") % r)
106
106
107 # setup store
107 # setup store
108 def pjoin(a, b):
108 def pjoin(a, b):
109 return a + '/' + b
109 return a + '/' + b
110 self.store = store.store(requirements, self.path, opener, pjoin)
110 self.store = store.store(requirements, self.path, opener, pjoin)
111 self.spath = self.store.path
111 self.spath = self.store.path
112 self.sopener = self.store.opener
112 self.sopener = self.store.opener
113 self.sjoin = self.store.join
113 self.sjoin = self.store.join
114
114
115 self.manifest = manifest.manifest(self.sopener)
115 self.manifest = manifest.manifest(self.sopener)
116 self.changelog = changelog.changelog(self.sopener)
116 self.changelog = changelog.changelog(self.sopener)
117 self.tagscache = None
117 self._tags = None
118 self.nodetagscache = None
118 self.nodetagscache = None
119 self.encodepats = None
119 self.encodepats = None
120 self.decodepats = None
120 self.decodepats = None
121
121
122 def url(self):
122 def url(self):
123 return self._url
123 return self._url
124
124
125 def local(self):
125 def local(self):
126 return False
126 return False
127
127
128 def lock(self, wait=True):
128 def lock(self, wait=True):
129 raise util.Abort(_('cannot lock static-http repository'))
129 raise util.Abort(_('cannot lock static-http repository'))
130
130
131 def instance(ui, path, create):
131 def instance(ui, path, create):
132 if create:
132 if create:
133 raise util.Abort(_('cannot create new static-http repository'))
133 raise util.Abort(_('cannot create new static-http repository'))
134 return statichttprepository(ui, path[7:])
134 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now