--Setup, actual queries after the space
--create table fltrind (a integer,b integer)
truncate table fltrind
DROP INDEX fltrind.nf
DROP INDEX fltrind.f
DECLARE @i integer
SET @i=0
while @i<100
BEGIN
INSERT INTO fltrind(a) values(@i)
SET @i=@i+1
END
INSERT INTO fltrind(a,b) values(9000,0900)
insert into fltrind(a,b)
select top 100000 f1.a,f1.b from fltrind f1 , fltrind f2
create nonclustered index nf on fltrind(b) INCLUDE(a)
create nonclustered index f on fltrind(b) INCLUDE(a) where b is not null
UPDATE STATISTICS fltrind WITH FULLSCAN
select a from fltrind where b is not null
DROP INDEX fltrind.nf
UPDATE STATISTICS fltrind WITH FULLSCAN
select a from fltrind where b is not null
Looking at the query plan, the filtered index is not used as long as the non-filtered index is present. Any idea why, and how can I get it to use the filtered index?
Dropping the non filtered index nf
makes the optimizer use the filtered index f
.
In fact, increasing the rowcount so that >10% of the rows qualify results in a table scan when the non filtered index is dropped.
Best Answer
Since SQL Server can skip NULL rows to start the range scan, the cost of either index is identical, so this is basically a coin toss for the optimizer. Look at the plans in SQL Sentry Plan Explorer* by default and when you hint the index (click to enlarge):
Since it's a toss-up, I don't know what benefit you'd get out of forcing SQL Server to choose one of two equally valid options.
*
Disclaimer: I work for SQL Sentry.